+ source /setup_export_logs.sh
++ CLICKHOUSE_CI_LOGS_CREDENTIALS=/tmp/export-logs-config.sh
++ CLICKHOUSE_CI_LOGS_USER=ci
++ CLICKHOUSE_CI_LOGS_CLUSTER=system_logs_export
++ EXTRA_COLUMNS='pull_request_number UInt32, commit_sha String, check_start_time DateTime('\''UTC'\''), check_name LowCardinality(String), instance_type LowCardinality(String), instance_id String, INDEX ix_pr (pull_request_number) TYPE set(100), INDEX ix_commit (commit_sha) TYPE set(100), INDEX ix_check_time (check_start_time) TYPE minmax, '
++ EXTRA_COLUMNS_EXPRESSION='CAST(0 AS UInt32) AS pull_request_number, '\'''\'' AS commit_sha, now() AS check_start_time, toLowCardinality('\'''\'') AS check_name, toLowCardinality('\'''\'') AS instance_type, '\'''\'' AS instance_id'
++ EXTRA_ORDER_BY_COLUMNS=check_name
++ EXTRA_COLUMNS_TRACE_LOG='pull_request_number UInt32, commit_sha String, check_start_time DateTime('\''UTC'\''), check_name LowCardinality(String), instance_type LowCardinality(String), instance_id String, INDEX ix_pr (pull_request_number) TYPE set(100), INDEX ix_commit (commit_sha) TYPE set(100), INDEX ix_check_time (check_start_time) TYPE minmax, symbols Array(LowCardinality(String)), lines Array(LowCardinality(String)), '
++ EXTRA_COLUMNS_EXPRESSION_TRACE_LOG='CAST(0 AS UInt32) AS pull_request_number, '\'''\'' AS commit_sha, now() AS check_start_time, toLowCardinality('\'''\'') AS check_name, toLowCardinality('\'''\'') AS instance_type, '\'''\'' AS instance_id, arrayMap(x -> demangle(addressToSymbol(x)), trace)::Array(LowCardinality(String)) AS symbols, arrayMap(x -> addressToLine(x), trace)::Array(LowCardinality(String)) AS lines'
++ EXTRA_COLUMNS_COVERAGE_LOG='pull_request_number UInt32, commit_sha String, check_start_time DateTime('\''UTC'\''), check_name LowCardinality(String), instance_type LowCardinality(String), instance_id String, INDEX ix_pr (pull_request_number) TYPE set(100), INDEX ix_commit (commit_sha) TYPE set(100), INDEX ix_check_time (check_start_time) TYPE minmax, symbols Array(LowCardinality(String)), '
++ EXTRA_COLUMNS_EXPRESSION_COVERAGE_LOG='CAST(0 AS UInt32) AS pull_request_number, '\'''\'' AS commit_sha, now() AS check_start_time, toLowCardinality('\'''\'') AS check_name, toLowCardinality('\'''\'') AS instance_type, '\'''\'' AS instance_id, arrayDistinct(arrayMap(x -> demangle(addressToSymbol(x)), coverage))::Array(LowCardinality(String)) AS symbols'
+ source /repo/tests/docker_scripts/stress_tests.lib
++ OK='\tOK\t\N\t'
++ FAIL='\tFAIL\t\N\t'
++ FAILURE_CONTEXT_LINES=100
++ FAILURE_CONTEXT_MAX_LINE_WIDTH=300
+ dmesg --clear
+ set -e -x -a
+ USE_DATABASE_REPLICATED=0
+ USE_SHARED_CATALOG=0
++ rg -v '#' /usr/share/zoneinfo/zone.tab
++ awk '{print $3}'
++ shuf
++ head -n1
+ TZ=Atlantic/Azores
+ echo 'Chosen random timezone Atlantic/Azores'
+ ln -snf /usr/share/zoneinfo/Atlantic/Azores /etc/localtime
Chosen random timezone Atlantic/Azores
+ echo Atlantic/Azores
+ dpkg -i package_folder/clickhouse-common-static_24.12.2.20221.altinityantalya+msan_amd64.deb
Selecting previously unselected package clickhouse-common-static.
(Reading database ... 48425 files and directories currently installed.)
Preparing to unpack .../clickhouse-common-static_24.12.2.20221.altinityantalya+msan_amd64.deb ...
Unpacking clickhouse-common-static (24.12.2.20221.altinityantalya+msan) ...
Setting up clickhouse-common-static (24.12.2.20221.altinityantalya+msan) ...
+ dpkg -i package_folder/clickhouse-common-static-dbg_24.12.2.20221.altinityantalya+msan_amd64.deb
Selecting previously unselected package clickhouse-common-static-dbg.
(Reading database ... 48452 files and directories currently installed.)
Preparing to unpack .../clickhouse-common-static-dbg_24.12.2.20221.altinityantalya+msan_amd64.deb ...
Unpacking clickhouse-common-static-dbg (24.12.2.20221.altinityantalya+msan) ...
Setting up clickhouse-common-static-dbg (24.12.2.20221.altinityantalya+msan) ...
+ dpkg -i package_folder/clickhouse-odbc-bridge_24.12.2.20221.altinityantalya+msan_amd64.deb
Selecting previously unselected package clickhouse-odbc-bridge.
(Reading database ... 48459 files and directories currently installed.)
Preparing to unpack .../clickhouse-odbc-bridge_24.12.2.20221.altinityantalya+msan_amd64.deb ...
Unpacking clickhouse-odbc-bridge (24.12.2.20221.altinityantalya+msan) ...
Setting up clickhouse-odbc-bridge (24.12.2.20221.altinityantalya+msan) ...
+ dpkg -i package_folder/clickhouse-library-bridge_24.12.2.20221.altinityantalya+msan_amd64.deb
Selecting previously unselected package clickhouse-library-bridge.
(Reading database ... 48465 files and directories currently installed.)
Preparing to unpack .../clickhouse-library-bridge_24.12.2.20221.altinityantalya+msan_amd64.deb ...
Unpacking clickhouse-library-bridge (24.12.2.20221.altinityantalya+msan) ...
Setting up clickhouse-library-bridge (24.12.2.20221.altinityantalya+msan) ...
+ dpkg -i package_folder/clickhouse-server_24.12.2.20221.altinityantalya+msan_amd64.deb
Selecting previously unselected package clickhouse-server.
(Reading database ... 48471 files and directories currently installed.)
Preparing to unpack .../clickhouse-server_24.12.2.20221.altinityantalya+msan_amd64.deb ...
Unpacking clickhouse-server (24.12.2.20221.altinityantalya+msan) ...
Setting up clickhouse-server (24.12.2.20221.altinityantalya+msan) ...
ClickHouse binary is already located at /usr/bin/clickhouse
Symlink /usr/bin/clickhouse-server already exists but it points to /clickhouse. Will replace the old symlink to /usr/bin/clickhouse.
Creating symlink /usr/bin/clickhouse-server to /usr/bin/clickhouse.
Creating symlink /usr/bin/clickhouse-client to /usr/bin/clickhouse.
Creating symlink /usr/bin/clickhouse-local to /usr/bin/clickhouse.
Creating symlink /usr/bin/clickhouse-benchmark to /usr/bin/clickhouse.
Creating symlink /usr/bin/clickhouse-obfuscator to /usr/bin/clickhouse.
Creating symlink /usr/bin/clickhouse-git-import to /usr/bin/clickhouse.
Creating symlink /usr/bin/clickhouse-compressor to /usr/bin/clickhouse.
Creating symlink /usr/bin/clickhouse-format to /usr/bin/clickhouse.
Symlink /usr/bin/clickhouse-extract-from-config already exists but it points to /clickhouse. Will replace the old symlink to /usr/bin/clickhouse.
Creating symlink /usr/bin/clickhouse-extract-from-config to /usr/bin/clickhouse.
Symlink /usr/bin/clickhouse-keeper already exists but it points to /clickhouse. Will replace the old symlink to /usr/bin/clickhouse.
Creating symlink /usr/bin/clickhouse-keeper to /usr/bin/clickhouse.
Symlink /usr/bin/clickhouse-keeper-converter already exists but it points to /clickhouse. Will replace the old symlink to /usr/bin/clickhouse.
Creating symlink /usr/bin/clickhouse-keeper-converter to /usr/bin/clickhouse.
Creating symlink /usr/bin/clickhouse-disks to /usr/bin/clickhouse.
Creating symlink /usr/bin/ch to /usr/bin/clickhouse.
Creating symlink /usr/bin/chl to /usr/bin/clickhouse.
Creating symlink /usr/bin/chc to /usr/bin/clickhouse.
Creating clickhouse group if it does not exist.
groupadd -r clickhouse
Creating clickhouse user if it does not exist.
useradd -r --shell /bin/false --home-dir /nonexistent -g clickhouse clickhouse
Will set ulimits for clickhouse user in /etc/security/limits.d/clickhouse.conf.
Creating config directory /etc/clickhouse-server/config.d that is used for tweaks of main server configuration.
Creating config directory /etc/clickhouse-server/users.d that is used for tweaks of users configuration.
Config file /etc/clickhouse-server/config.xml already exists, will keep it and extract path info from it.
/etc/clickhouse-server/config.xml has /var/lib/clickhouse/ as data path.
/etc/clickhouse-server/config.xml has /var/log/clickhouse-server/ as log path.
Users config file /etc/clickhouse-server/users.xml already exists, will keep it and extract users info from it.
Log directory /var/log/clickhouse-server/ already exists.
Creating data directory /var/lib/clickhouse/.
Creating pid directory /var/run/clickhouse-server.
chown -R clickhouse:clickhouse '/var/log/clickhouse-server/'
chown -R clickhouse:clickhouse '/var/run/clickhouse-server'
chown clickhouse:clickhouse '/var/lib/clickhouse/'
groupadd -r clickhouse-bridge
useradd -r --shell /bin/false --home-dir /nonexistent -g clickhouse-bridge clickhouse-bridge
chown -R clickhouse-bridge:clickhouse-bridge '/usr/bin/clickhouse-odbc-bridge'
chown -R clickhouse-bridge:clickhouse-bridge '/usr/bin/clickhouse-library-bridge'
Password for the default user is an empty string. See /etc/clickhouse-server/users.xml and /etc/clickhouse-server/users.d to change it.
Setting capabilities for clickhouse binary. This is optional.
chown -R clickhouse:clickhouse '/etc/clickhouse-server'
ClickHouse has been successfully installed.
Start clickhouse-server with:
sudo clickhouse start
Start clickhouse-client with:
clickhouse-client
+ dpkg -i package_folder/clickhouse-client_24.12.2.20221.altinityantalya+msan_amd64.deb
Selecting previously unselected package clickhouse-client.
(Reading database ... 48488 files and directories currently installed.)
Preparing to unpack .../clickhouse-client_24.12.2.20221.altinityantalya+msan_amd64.deb ...
Unpacking clickhouse-client (24.12.2.20221.altinityantalya+msan) ...
Setting up clickhouse-client (24.12.2.20221.altinityantalya+msan) ...
+ echo ''
+ [[ -z '' ]]
+ ch --query 'SELECT 1'
1
+ chl --query 'SELECT 1'
1
+ chc --version
ClickHouse client version 24.12.2.20221.altinityantalya (altinity build).
+ ln -sf /repo/tests/clickhouse-test /usr/bin/clickhouse-test
+ export CLICKHOUSE_GRPC_CLIENT=/repo/utils/grpc-client/clickhouse-grpc-client.py
+ CLICKHOUSE_GRPC_CLIENT=/repo/utils/grpc-client/clickhouse-grpc-client.py
+ source /repo/tests/docker_scripts/attach_gdb.lib
++ source /repo/tests/docker_scripts/utils.lib
+++ sysctl kernel.core_pattern=core.%e.%p-%P
kernel.core_pattern = core.%e.%p-%P
+++ sysctl fs.suid_dumpable=1
fs.suid_dumpable = 1
+ source /repo/tests/docker_scripts/utils.lib
++ sysctl kernel.core_pattern=core.%e.%p-%P
kernel.core_pattern = core.%e.%p-%P
++ sysctl fs.suid_dumpable=1
fs.suid_dumpable = 1
+ /repo/tests/config/install.sh
+ DEST_SERVER_PATH=/etc/clickhouse-server
+ DEST_CLIENT_PATH=/etc/clickhouse-client
+++ dirname /repo/tests/config/install.sh
++ cd /repo/tests/config
++ pwd -P
+ SRC_PATH=/repo/tests/config
+ '[' 0 -ge 2 ']'
+ FAST_TEST=0
+ NO_AZURE=0
+ [[ 0 -gt 0 ]]
+ echo 'Going to install test configs from /repo/tests/config into /etc/clickhouse-server'
+ mkdir -p /etc/clickhouse-server/config.d/
Going to install test configs from /repo/tests/config into /etc/clickhouse-server
+ mkdir -p /etc/clickhouse-server/users.d/
+ mkdir -p /etc/clickhouse-client
+ ln -sf /repo/tests/config/config.d/zookeeper_write.xml /etc/clickhouse-server/config.d/
+ ln -sf /repo/tests/config/config.d/max_num_to_warn.xml /etc/clickhouse-server/config.d/
+ ln -sf /repo/tests/config/config.d/listen.xml /etc/clickhouse-server/config.d/
+ ln -sf /repo/tests/config/config.d/text_log.xml /etc/clickhouse-server/config.d/
+ ln -sf /repo/tests/config/config.d/blob_storage_log.xml /etc/clickhouse-server/config.d/
+ ln -sf /repo/tests/config/config.d/custom_settings_prefixes.xml /etc/clickhouse-server/config.d/
+ ln -sf /repo/tests/config/config.d/database_catalog_drop_table_concurrency.xml /etc/clickhouse-server/config.d/
+ ln -sf /repo/tests/config/config.d/enable_access_control_improvements.xml /etc/clickhouse-server/config.d/
+ ln -sf /repo/tests/config/config.d/macros.xml /etc/clickhouse-server/config.d/
+ ln -sf /repo/tests/config/config.d/secure_ports.xml /etc/clickhouse-server/config.d/
+ ln -sf /repo/tests/config/config.d/clusters.xml /etc/clickhouse-server/config.d/
+ ln -sf /repo/tests/config/config.d/graphite.xml /etc/clickhouse-server/config.d/
+ ln -sf /repo/tests/config/config.d/graphite_alternative.xml /etc/clickhouse-server/config.d/
+ ln -sf /repo/tests/config/config.d/grpc_protocol.xml /etc/clickhouse-server/config.d/
+ ln -sf /repo/tests/config/config.d/database_atomic.xml /etc/clickhouse-server/config.d/
+ ln -sf /repo/tests/config/config.d/max_concurrent_queries.xml /etc/clickhouse-server/config.d/
+ ln -sf /repo/tests/config/config.d/merge_tree_settings.xml /etc/clickhouse-server/config.d/
+ ln -sf /repo/tests/config/config.d/backoff_failed_mutation.xml /etc/clickhouse-server/config.d/
+ ln -sf /repo/tests/config/config.d/merge_tree_old_dirs_cleanup.xml /etc/clickhouse-server/config.d/
+ ln -sf /repo/tests/config/config.d/test_cluster_with_incorrect_pw.xml /etc/clickhouse-server/config.d/
+ ln -sf /repo/tests/config/config.d/keeper_port.xml /etc/clickhouse-server/config.d/
+ ln -sf /repo/tests/config/config.d/logging_no_rotate.xml /etc/clickhouse-server/config.d/
+ ln -sf /repo/tests/config/config.d/merge_tree.xml /etc/clickhouse-server/config.d/
+ ln -sf /repo/tests/config/config.d/lost_forever_check.xml /etc/clickhouse-server/config.d/
+ ln -sf /repo/tests/config/config.d/tcp_with_proxy.xml /etc/clickhouse-server/config.d/
+ ln -sf /repo/tests/config/config.d/prometheus.xml /etc/clickhouse-server/config.d/
+ ln -sf /repo/tests/config/config.d/top_level_domains_lists.xml /etc/clickhouse-server/config.d/
+ ln -sf /repo/tests/config/config.d/top_level_domains_path.xml /etc/clickhouse-server/config.d/
+ ln -sf /repo/tests/config/config.d/transactions.xml /etc/clickhouse-server/config.d/
+ ln -sf /repo/tests/config/config.d/encryption.xml /etc/clickhouse-server/config.d/
+ ln -sf /repo/tests/config/config.d/CORS.xml /etc/clickhouse-server/config.d/
+ ln -sf /repo/tests/config/config.d/zookeeper_log.xml /etc/clickhouse-server/config.d/
+ ln -sf /repo/tests/config/config.d/logger_trace.xml /etc/clickhouse-server/config.d/
+ ln -sf /repo/tests/config/config.d/named_collection.xml /etc/clickhouse-server/config.d/
+ ln -sf /repo/tests/config/config.d/ssl_certs.xml /etc/clickhouse-server/config.d/
+ ln -sf /repo/tests/config/config.d/filesystem_cache_log.xml /etc/clickhouse-server/config.d/
+ ln -sf /repo/tests/config/config.d/session_log.xml /etc/clickhouse-server/config.d/
+ ln -sf /repo/tests/config/config.d/system_unfreeze.xml /etc/clickhouse-server/config.d/
+ ln -sf /repo/tests/config/config.d/enable_zero_copy_replication.xml /etc/clickhouse-server/config.d/
+ ln -sf /repo/tests/config/config.d/nlp.xml /etc/clickhouse-server/config.d/
+ ln -sf /repo/tests/config/config.d/forbidden_headers.xml /etc/clickhouse-server/config.d/
+ ln -sf /repo/tests/config/config.d/enable_keeper_map.xml /etc/clickhouse-server/config.d/
+ ln -sf /repo/tests/config/config.d/custom_disks_base_path.xml /etc/clickhouse-server/config.d/
+ ln -sf /repo/tests/config/config.d/display_name.xml /etc/clickhouse-server/config.d/
+ ln -sf /repo/tests/config/config.d/compressed_marks_and_index.xml /etc/clickhouse-server/config.d/
+ ln -sf /repo/tests/config/config.d/disable_s3_env_credentials.xml /etc/clickhouse-server/config.d/
+ ln -sf /repo/tests/config/config.d/enable_wait_for_shutdown_replicated_tables.xml /etc/clickhouse-server/config.d/
+ ln -sf /repo/tests/config/config.d/backups.xml /etc/clickhouse-server/config.d/
+ ln -sf /repo/tests/config/config.d/filesystem_caches_path.xml /etc/clickhouse-server/config.d/
+ ln -sf /repo/tests/config/config.d/validate_tcp_client_information.xml /etc/clickhouse-server/config.d/
+ ln -sf /repo/tests/config/config.d/zero_copy_destructive_operations.xml /etc/clickhouse-server/config.d/
+ ln -sf /repo/tests/config/config.d/block_number.xml /etc/clickhouse-server/config.d/
+ ln -sf /repo/tests/config/config.d/handlers.yaml /etc/clickhouse-server/config.d/
+ ln -sf /repo/tests/config/config.d/serverwide_trace_collector.xml /etc/clickhouse-server/config.d/
+ ln -sf /repo/tests/config/config.d/rocksdb.xml /etc/clickhouse-server/config.d/
+ '[' 0 '!=' 1 ']'
+ ln -sf /repo/tests/config/config.d/legacy_geobase.xml /etc/clickhouse-server/config.d/
+ ln -sf /repo/tests/config/users.d/log_queries.xml /etc/clickhouse-server/users.d/
+ ln -sf /repo/tests/config/users.d/readonly.xml /etc/clickhouse-server/users.d/
+ ln -sf /repo/tests/config/users.d/access_management.xml /etc/clickhouse-server/users.d/
+ ln -sf /repo/tests/config/users.d/database_atomic_drop_detach_sync.xml /etc/clickhouse-server/users.d/
+ ln -sf /repo/tests/config/users.d/opentelemetry.xml /etc/clickhouse-server/users.d/
+ ln -sf /repo/tests/config/users.d/remote_queries.xml /etc/clickhouse-server/users.d/
+ ln -sf /repo/tests/config/users.d/session_log_test.xml /etc/clickhouse-server/users.d/
+ ln -sf /repo/tests/config/users.d/memory_profiler.xml /etc/clickhouse-server/users.d/
+ ln -sf /repo/tests/config/users.d/no_fsync_metadata.xml /etc/clickhouse-server/users.d/
+ ln -sf /repo/tests/config/users.d/filelog.xml /etc/clickhouse-server/users.d/
+ ln -sf /repo/tests/config/users.d/enable_blobs_check.xml /etc/clickhouse-server/users.d/
+ ln -sf /repo/tests/config/users.d/marks.xml /etc/clickhouse-server/users.d/
+ ln -sf /repo/tests/config/users.d/insert_keeper_retries.xml /etc/clickhouse-server/users.d/
+ ln -sf /repo/tests/config/users.d/prefetch_settings.xml /etc/clickhouse-server/users.d/
+ ln -sf /repo/tests/config/users.d/nonconst_timezone.xml /etc/clickhouse-server/users.d/
+ ln -sf /repo/tests/config/users.d/allow_introspection_functions.yaml /etc/clickhouse-server/users.d/
+ ln -sf /repo/tests/config/users.d/replicated_ddl_entry.xml /etc/clickhouse-server/users.d/
+ ln -sf /repo/tests/config/users.d/limits.yaml /etc/clickhouse-server/users.d/
+ [[ -n '' ]]
+ ln -sf /repo/tests/config/users.d/timeouts.xml /etc/clickhouse-server/users.d/
+ ln -sf /repo/tests/config/ints_dictionary.xml /etc/clickhouse-server/
+ ln -sf /repo/tests/config/strings_dictionary.xml /etc/clickhouse-server/
+ ln -sf /repo/tests/config/decimals_dictionary.xml /etc/clickhouse-server/
+ ln -sf /repo/tests/config/executable_dictionary.xml /etc/clickhouse-server/
+ ln -sf /repo/tests/config/executable_pool_dictionary.xml /etc/clickhouse-server/
+ ln -sf /repo/tests/config/test_function.xml /etc/clickhouse-server/
+ ln -sf /repo/tests/config/top_level_domains /etc/clickhouse-server/
+ ln -sf /repo/tests/config/regions_hierarchy.txt /etc/clickhouse-server/config.d/
+ ln -sf /repo/tests/config/regions_names_en.txt /etc/clickhouse-server/config.d/
+ ln -sf /repo/tests/config/regions_names_es.txt /etc/clickhouse-server/config.d/
+ ln -sf /repo/tests/config/ext-en.txt /etc/clickhouse-server/config.d/
+ ln -sf /repo/tests/config/ext-ru.txt /etc/clickhouse-server/config.d/
+ ln -sf /repo/tests/config/lem-en.bin /etc/clickhouse-server/config.d/
+ ln -sf /repo/tests/config/server.key /etc/clickhouse-server/
+ ln -sf /repo/tests/config/server.crt /etc/clickhouse-server/
+ ln -sf /repo/tests/config/dhparam.pem /etc/clickhouse-server/
+ ln -sf --backup=simple --suffix=_original.xml /repo/tests/config/config.d/query_masking_rules.xml /etc/clickhouse-server/config.d/
+ [[ -n '' ]]
+ rm -f /etc/clickhouse-server/config.d/zookeeper_fault_injection.xml
+ ln -sf /repo/tests/config/config.d/zookeeper.xml /etc/clickhouse-server/config.d/
+ [[ -n '' ]]
+ rm -f /etc/clickhouse-server/config.d/cannot_allocate_thread_injection.xml
+ value=0
+ sed --follow-symlinks -i 's|[01]|0|' /etc/clickhouse-server/config.d/keeper_port.xml
+ value=57333760
+ sed --follow-symlinks -i 's|[[:digit:]]\+|57333760|' /etc/clickhouse-server/config.d/keeper_port.xml
+ value=38379520
+ sed --follow-symlinks -i 's|[[:digit:]]\+|38379520|' /etc/clickhouse-server/config.d/keeper_port.xml
+ [[ -n '' ]]
+ [[ -n '' ]]
+ [[ '' == \1 ]]
+ [[ '' == \1 ]]
+ [[ 1 == \1 ]]
+ [[ 0 != \1 ]]
+ ln -sf /repo/tests/config/config.d/azure_storage_conf.xml /etc/clickhouse-server/config.d/
+ ln -sf /repo/tests/config/config.d/storage_conf.xml /etc/clickhouse-server/config.d/
+ ln -sf /repo/tests/config/config.d/storage_conf_02944.xml /etc/clickhouse-server/config.d/
+ ln -sf /repo/tests/config/config.d/storage_conf_02963.xml /etc/clickhouse-server/config.d/
+ ln -sf /repo/tests/config/config.d/storage_conf_02961.xml /etc/clickhouse-server/config.d/
+ ln -sf /repo/tests/config/users.d/s3_cache.xml /etc/clickhouse-server/users.d/
+ ln -sf /repo/tests/config/users.d/s3_cache_new.xml /etc/clickhouse-server/users.d/
+ [[ 0 == \1 ]]
+ ln -sf /repo/tests/config/client_config.xml /etc/clickhouse-client/config.xml
+ /repo/tests/docker_scripts/setup_minio.sh stateless
+ azurite-blob --blobHost 0.0.0.0 --blobPort 10000 --silent --inMemoryPersistence
+ export MINIO_ROOT_USER=clickhouse
+ MINIO_ROOT_USER=clickhouse
+ export MINIO_ROOT_PASSWORD=clickhouse
+ MINIO_ROOT_PASSWORD=clickhouse
+ main stateless
+ local query_dir
++ check_arg stateless
++ local query_dir
++ '[' '!' 1 -eq 1 ']'
++ case "$1" in
++ query_dir=0_stateless
++ echo 0_stateless
+ query_dir=0_stateless
+ '[' '!' -f ./minio ']'
+ start_minio
+ mkdir -p ./minio_data
+ ./minio --version
minio version RELEASE.2024-08-03T04-33-23Z (commit-id=6efb56851c40da88d1ca15112e2d686a4ecec6b3)
Runtime: go1.22.5 linux/amd64
License: GNU AGPLv3 - https://www.gnu.org/licenses/agpl-3.0.html
Copyright: 2015-2024 MinIO, Inc.
+ wait_for_it
+ local counter=0
+ local max_counter=60
+ local url=http://localhost:11111
+ ./minio server --address :11111 ./minio_data
+ params=('--silent' '--verbose')
+ local params
+ curl --silent --verbose http://localhost:11111
+ grep AccessDenied
+ [[ 0 == \6\0 ]]
+ echo 'trying to connect to minio'
+ sleep 1
trying to connect to minio
(node:260) [DEP0040] DeprecationWarning: The `punycode` module is deprecated. Please use a userland alternative instead.
(Use `node --trace-deprecation ...` to show where the warning was created)
In-memory extent storage is enabled with a limit of 15667.21 MB (16428259328 bytes, 50% of total memory).
Azurite Blob service is starting on 0.0.0.0:10000
Azurite Blob service successfully listens on http://0.0.0.0:10000
INFO: Formatting 1st pool, 1 set(s), 1 drives per set.
INFO: WARNING: Host local has more than 0 drives of set. A host failure will result in data becoming unavailable.
MinIO Object Storage Server
Copyright: 2015-2025 MinIO, Inc.
License: GNU AGPLv3 - https://www.gnu.org/licenses/agpl-3.0.html
Version: RELEASE.2024-08-03T04-33-23Z (go1.22.5 linux/amd64)
API: http://172.17.0.2:11111 http://127.0.0.1:11111
WebUI: http://172.17.0.2:40861 http://127.0.0.1:40861
Docs: https://min.io/docs/minio/linux/index.html
+ counter=1
+ curl --silent --verbose http://localhost:11111
+ grep AccessDenied
AccessDenied
Access Denied./18324FF80D7148427dc7eb22d3288ec80374614e9088e31d3668a6922ead55932dd2a8e56373820f
+ lsof -i :11111
COMMAND PID USER FD TYPE DEVICE SIZE/OFF NODE NAME
minio 284 root 9u IPv4 29210 0t0 TCP localhost:11111 (LISTEN)
minio 284 root 10u IPv6 29211 0t0 TCP *:11111 (LISTEN)
minio 284 root 11u IPv6 29212 0t0 TCP localhost:11111 (LISTEN)
+ sleep 5
+ setup_minio stateless
+ local test_type=stateless
+ ./mc alias set clickminio http://localhost:11111 clickhouse clickhouse
Added `clickminio` successfully.
+ ./mc admin user add clickminio test testtest
Added user `test` successfully.
+ ./mc admin policy attach clickminio readwrite --user=test
Attached Policies: [readwrite]
To User: test
+ ./mc mb --ignore-existing clickminio/test
Bucket created successfully `clickminio/test`.
+ '[' stateless = stateless ']'
+ ./mc anonymous set public clickminio/test
Access permission for `clickminio/test` is set to `public`
+ upload_data 0_stateless /repo/tests/
+ local query_dir=0_stateless
+ local test_path=/repo/tests/
+ local data_path=/repo/tests//queries/0_stateless/data_minio
+ '[' -d /repo/tests//queries/0_stateless/data_minio ']'
+ ./mc cp --recursive /repo/tests//queries/0_stateless/data_minio/ clickminio/test/
`/repo/tests/queries/0_stateless/data_minio/03036_archive1.tar` -> `clickminio/test/03036_archive1.tar`
`/repo/tests/queries/0_stateless/data_minio/02731.arrow` -> `clickminio/test/02731.arrow`
`/repo/tests/queries/0_stateless/data_minio/02366_data.jsonl` -> `clickminio/test/02366_data.jsonl`
`/repo/tests/queries/0_stateless/data_minio/02731.parquet` -> `clickminio/test/02731.parquet`
`/repo/tests/queries/0_stateless/data_minio/03036_archive1.zip` -> `clickminio/test/03036_archive1.zip`
`/repo/tests/queries/0_stateless/data_minio/02876.parquet` -> `clickminio/test/02876.parquet`
`/repo/tests/queries/0_stateless/data_minio/03036_archive2.tar` -> `clickminio/test/03036_archive2.tar`
`/repo/tests/queries/0_stateless/data_minio/03036_archive2.zip` -> `clickminio/test/03036_archive2.zip`
`/repo/tests/queries/0_stateless/data_minio/03036_archive3.tar.gz` -> `clickminio/test/03036_archive3.tar.gz`
`/repo/tests/queries/0_stateless/data_minio/03036_compressed_file_archive.zip` -> `clickminio/test/03036_compressed_file_archive.zip`
`/repo/tests/queries/0_stateless/data_minio/03036_json_archive.zip` -> `clickminio/test/03036_json_archive.zip`
`/repo/tests/queries/0_stateless/data_minio/a.tsv` -> `clickminio/test/a.tsv`
`/repo/tests/queries/0_stateless/data_minio/b.tsv` -> `clickminio/test/b.tsv`
`/repo/tests/queries/0_stateless/data_minio/c.tsv` -> `clickminio/test/c.tsv`
`/repo/tests/queries/0_stateless/data_minio/hive_partitioning/column0=Elizabeth/column1=Gordon/sample.parquet` -> `clickminio/test/hive_partitioning/column0=Elizabeth/column1=Gordon/sample.parquet`
`/repo/tests/queries/0_stateless/data_minio/hive_partitioning/column0=Elizabeth/column1=Schmidt/sample.parquet` -> `clickminio/test/hive_partitioning/column0=Elizabeth/column1=Schmidt/sample.parquet`
`/repo/tests/queries/0_stateless/data_minio/hive_partitioning/column0=Elizabeth/sample.parquet` -> `clickminio/test/hive_partitioning/column0=Elizabeth/sample.parquet`
`/repo/tests/queries/0_stateless/data_minio/hive_partitioning/non_existing_column=Elizabeth/sample.parquet` -> `clickminio/test/hive_partitioning/non_existing_column=Elizabeth/sample.parquet`
`/repo/tests/queries/0_stateless/data_minio/json_data` -> `clickminio/test/json_data`
`/repo/tests/queries/0_stateless/data_minio/tsv_with_header.tsv` -> `clickminio/test/tsv_with_header.tsv`
Total: 5.42 MiB, Transferred: 5.42 MiB, Speed: 161.06 MiB/s
+ setup_aws_credentials
+ local minio_root_user=clickhouse
+ local minio_root_password=clickhouse
+ mkdir -p /root/.aws
+ cat
+ config_logs_export_cluster /etc/clickhouse-server/config.d/system_logs_export.yaml
+ set +x
File /tmp/export-logs-config.sh does not exist, do not setup
+ [[ -n '' ]]
+ export IS_FLAKY_CHECK=0
+ IS_FLAKY_CHECK=0
+ export NUM_TRIES
+ '[' 1 -gt 1 ']'
+ sudo -E -u clickhouse /usr/bin/clickhouse-server --config /etc/clickhouse-server/config.xml --daemon --pid-file /var/run/clickhouse-server/clickhouse-server.pid
+ [[ 0 -eq 1 ]]
+ [[ 0 -eq 1 ]]
+ for _ in {1..100}
+ clickhouse-client --query 'SELECT 1'
Code: 210. DB::NetException: Connection refused (localhost:9000). (NETWORK_ERROR)
+ sleep 1
+ for _ in {1..100}
+ clickhouse-client --query 'SELECT 1'
Code: 210. DB::NetException: Connection refused (localhost:9000). (NETWORK_ERROR)
+ sleep 1
+ for _ in {1..100}
+ clickhouse-client --query 'SELECT 1'
Code: 210. DB::NetException: Connection refused (localhost:9000). (NETWORK_ERROR)
+ sleep 1
+ for _ in {1..100}
+ clickhouse-client --query 'SELECT 1'
Code: 210. DB::NetException: Connection refused (localhost:9000). (NETWORK_ERROR)
+ sleep 1
+ for _ in {1..100}
+ clickhouse-client --query 'SELECT 1'
Code: 210. DB::NetException: Connection refused (localhost:9000). (NETWORK_ERROR)
+ sleep 1
+ for _ in {1..100}
+ clickhouse-client --query 'SELECT 1'
Code: 210. DB::NetException: Connection refused (localhost:9000). (NETWORK_ERROR)
+ sleep 1
+ for _ in {1..100}
+ clickhouse-client --query 'SELECT 1'
Code: 210. DB::NetException: Connection refused (localhost:9000). (NETWORK_ERROR)
+ sleep 1
+ for _ in {1..100}
+ clickhouse-client --query 'SELECT 1'
Code: 210. DB::NetException: Connection refused (localhost:9000). (NETWORK_ERROR)
+ sleep 1
+ for _ in {1..100}
+ clickhouse-client --query 'SELECT 1'
Code: 210. DB::NetException: Connection refused (localhost:9000). (NETWORK_ERROR)
+ sleep 1
+ for _ in {1..100}
+ clickhouse-client --query 'SELECT 1'
Code: 210. DB::NetException: Connection refused (localhost:9000). (NETWORK_ERROR)
+ sleep 1
+ for _ in {1..100}
+ clickhouse-client --query 'SELECT 1'
1
+ break
+ setup_logs_replication
+ set +x
File /tmp/export-logs-config.sh does not exist, do not setup
+ attach_gdb_to_clickhouse
++ run_with_retry 5 clickhouse-client --query 'SELECT count() FROM system.build_options WHERE name = '\''CXX_FLAGS'\'' AND position('\''sanitize=address'\'' IN value)'
++ [[ ahxB =~ e ]]
++ set_e=false
++ set +e
++ local total_retries=5
++ shift
++ local retry=0
++ '[' 0 -ge 5 ']'
++ clickhouse-client --query 'SELECT count() FROM system.build_options WHERE name = '\''CXX_FLAGS'\'' AND position('\''sanitize=address'\'' IN value)'
++ false
++ return
+ IS_ASAN=0
+ [[ 0 = \1 ]]
++ kill -l SIGRTMIN
+ RTMIN=34
+ echo '
set follow-fork-mode parent
handle SIGHUP nostop noprint pass
handle SIGINT nostop noprint pass
handle SIGQUIT nostop noprint pass
handle SIGPIPE nostop noprint pass
handle SIGTERM nostop noprint pass
handle SIGUSR1 nostop noprint pass
handle SIGUSR2 nostop noprint pass
handle SIG34 nostop noprint pass
info signals
continue
backtrace full
info registers
p top' 1 KiB of the 'stack:
p/x *(uint64_t[128]*)$sp
maintenance info sections
thread apply all backtrace full
disassemble /s
up
disassemble /s
up
disassemble /s
p "done"
detach
quit
'
+ sleep 5
+ ts '%Y-%m-%d %H:%M:%S'
++ cat /var/run/clickhouse-server/clickhouse-server.pid
+ gdb -batch -command script.gdb -p 414
aarch64-binfmt-P: Could not open '/lib/ld-linux-aarch64.so.1': No such file or directory
+ run_with_retry 60 clickhouse-client --query 'SELECT '\''Connected to clickhouse-server after attaching gdb'\'''
+ [[ aehxB =~ e ]]
+ set_e=true
+ set +e
+ local total_retries=60
+ shift
+ local retry=0
+ '[' 0 -ge 60 ']'
+ clickhouse-client --query 'SELECT '\''Connected to clickhouse-server after attaching gdb'\'''
Connected to clickhouse-server after attaching gdb
+ true
+ set -e
+ return
+ clickhouse-client --allow_experimental_json_type=1 --query 'CREATE TABLE minio_audit_logs
(
log JSON(time DateTime64(9))
)
ENGINE = MergeTree
ORDER BY tuple()'
+ clickhouse-client --allow_experimental_json_type=1 --query 'CREATE TABLE minio_server_logs
(
log JSON(time DateTime64(9))
)
ENGINE = MergeTree
ORDER BY tuple()'
+ ./mc admin config set clickminio logger_webhook:ch_server_webhook 'endpoint=http://localhost:8123/?async_insert=1&wait_for_async_insert=0&async_insert_busy_timeout_min_ms=5000&async_insert_busy_timeout_max_ms=5000&async_insert_max_query_number=1000&async_insert_max_data_size=10485760&date_time_input_format=best_effort&query=INSERT%20INTO%20minio_server_logs%20FORMAT%20JSONAsObject' queue_size=1000000 batch_size=500
Successfully applied new settings.
+ ./mc admin config set clickminio audit_webhook:ch_audit_webhook 'endpoint=http://localhost:8123/?async_insert=1&wait_for_async_insert=0&async_insert_busy_timeout_min_ms=5000&async_insert_busy_timeout_max_ms=5000&async_insert_max_query_number=1000&async_insert_max_data_size=10485760&date_time_input_format=best_effort&query=INSERT%20INTO%20minio_audit_logs%20FORMAT%20JSONAsObject' queue_size=1000000 batch_size=500
Successfully applied new settings.
clickminio restart attempt 1:
+ max_retries=100
+ retry=1
+ '[' 1 -le 100 ']'
+ echo 'clickminio restart attempt 1:'
++ ./mc admin service restart clickminio --wait --json
++ jq -r .status
INFO: Restarting on service signal
MinIO Object Storage Server
Copyright: 2015-2025 MinIO, Inc.
License: GNU AGPLv3 - https://www.gnu.org/licenses/agpl-3.0.html
Version: RELEASE.2024-08-03T04-33-23Z (go1.22.5 linux/amd64)
API: http://172.17.0.2:11111 http://127.0.0.1:11111
WebUI: http://172.17.0.2:39967 http://127.0.0.1:39967
Docs: https://min.io/docs/minio/linux/index.html
Output of restart status: success
success
Restarted clickminio successfully.
+ output='success
success'
+ echo 'Output of restart status: success
success'
+ expected_output='success
success'
+ '[' 'success
success' = 'success
success' ']'
+ echo 'Restarted clickminio successfully.'
+ break
+ '[' 1 -gt 100 ']'
+ MC_ADMIN_PID=1337
+ ./mc admin trace clickminio
+ export -f run_tests
+ '[' 1 -gt 1 ']'
+ run_tests
+ set -x
+ read -ra ADDITIONAL_OPTIONS
+ HIGH_LEVEL_COVERAGE=YES
+ '[' 1 -gt 1 ']'
+ [[ -n '' ]]
+ [[ -n '' ]]
+ [[ 0 -eq 1 ]]
+ [[ '' -eq 1 ]]
+ [[ 0 -eq 1 ]]
++ clickhouse-client --query 'SELECT value LIKE '\''%SANITIZE_COVERAGE%'\'' FROM system.build_options WHERE name = '\''CXX_FLAGS'\'''
+ [[ 1 == 0 ]]
+ ADDITIONAL_OPTIONS+=('--jobs')
+ ADDITIONAL_OPTIONS+=('8')
+ [[ -n 1 ]]
+ [[ -n 4 ]]
+ ADDITIONAL_OPTIONS+=('--run-by-hash-num')
+ ADDITIONAL_OPTIONS+=("$RUN_BY_HASH_NUM")
+ ADDITIONAL_OPTIONS+=('--run-by-hash-total')
+ ADDITIONAL_OPTIONS+=("$RUN_BY_HASH_TOTAL")
+ HIGH_LEVEL_COVERAGE=NO
+ [[ -n '' ]]
+ [[ NO = \Y\E\S ]]
+ ADDITIONAL_OPTIONS+=('--report-logs-stats')
+ try_run_with_retry 10 clickhouse-client -q 'insert into system.zookeeper (name, path, value) values ('\''auxiliary_zookeeper2'\'', '\''/test/chroot/'\'', '\'''\'')'
+ local total_retries=10
+ shift
+ fn_exists run_with_retry
+ declare -F run_with_retry
+ run_with_retry 10 clickhouse-client -q 'insert into system.zookeeper (name, path, value) values ('\''auxiliary_zookeeper2'\'', '\''/test/chroot/'\'', '\'''\'')'
+ [[ aehxB =~ e ]]
+ set_e=true
+ set +e
+ local total_retries=10
+ shift
+ local retry=0
+ '[' 0 -ge 10 ']'
+ clickhouse-client -q 'insert into system.zookeeper (name, path, value) values ('\''auxiliary_zookeeper2'\'', '\''/test/chroot/'\'', '\'''\'')'
+ true
+ set -e
+ return
+ set +e
+ TEST_ARGS=(--testname --shard --zookeeper --check-zookeeper-session --hung-check --print-time --no-drop-if-fail --capture-client-stacktrace --queries "/repo/tests/queries" --test-runs "$NUM_TRIES" "${ADDITIONAL_OPTIONS[@]}")
+ clickhouse-test --testname --shard --zookeeper --check-zookeeper-session --hung-check --print-time --no-drop-if-fail --capture-client-stacktrace --queries /repo/tests/queries --test-runs 1 --hung-check --print-time --jobs 8 --run-by-hash-num 1 --run-by-hash-total 4 --report-logs-stats
+ ts '%Y-%m-%d %H:%M:%S'
+ tee -a test_output/test_result.txt
2025-04-01 21:45:58 Using queries from '/repo/tests/queries' directory
2025-04-01 21:45:58 Connecting to ClickHouse server... OK
2025-04-01 21:45:58 Connected to server 24.12.2.20221.altinityantalya @ 82252d159dc02cab0f366aaa5691adc1545dd11d HEAD
2025-04-01 21:45:59 Found 1718 parallel tests and 139 sequential tests
2025-04-01 21:46:00 Running about 214 stateless tests (Process-9).
2025-04-01 21:46:00 02923_explain_expired_context: [ OK ] 0.99 sec.
2025-04-01 21:46:00 Running about 214 stateless tests (Process-5).
2025-04-01 21:46:00 01825_new_type_json_10: [ OK ] 1.15 sec.
2025-04-01 21:46:00 Running about 214 stateless tests (Process-6).
2025-04-01 21:46:00 01548_lzy305: [ OK ] 1.30 sec.
2025-04-01 21:46:01 Running about 214 stateless tests (Process-8).
2025-04-01 21:46:01 01737_move_order_key_to_prewhere_select_final: [ OK ] 1.65 sec.
2025-04-01 21:46:01 02026_describe_include_subcolumns: [ OK ] 1.08 sec.
2025-04-01 21:46:02 Running about 214 stateless tests (Process-4).
2025-04-01 21:46:02 02995_new_settings_history: [ OK ] 2.94 sec.
2025-04-01 21:46:02 02689_meaningless_data_types: [ OK ] 2.29 sec.
2025-04-01 21:46:02 03210_lag_lead_inframe_types: [ OK ] 1.13 sec.
2025-04-01 21:46:02 03223_nested_json_in_shared_data_merges: [ OK ] 2.13 sec.
2025-04-01 21:46:03 00106_totals_after_having: [ OK ] 1.23 sec.
2025-04-01 21:46:03 02813_starting_in_text_log: [ OK ] 2.58 sec.
2025-04-01 21:46:03 00735_or_expr_optimize_bug: [ OK ] 0.98 sec.
2025-04-01 21:46:04 Running about 214 stateless tests (Process-3).
2025-04-01 21:46:04 02147_arrow_duplicate_columns: [ OK ] 4.64 sec.
2025-04-01 21:46:04 03229_empty_tuple_in_array: [ OK ] 0.78 sec.
2025-04-01 21:46:04 02185_split_by_char: [ OK ] 0.88 sec.
2025-04-01 21:46:04 01114_materialize_clear_index_compact_parts: [ OK ] 1.89 sec.
2025-04-01 21:46:05 01852_jit_if: [ OK ] 1.13 sec.
2025-04-01 21:46:05 01811_datename: [ OK ] 1.68 sec.
2025-04-01 21:46:05 01515_with_global_and_with_propagation: [ OK ] 1.18 sec.
2025-04-01 21:46:05 03035_argMinMax_numeric_non_extreme_bug: [ OK ] 1.04 sec.
2025-04-01 21:46:06 01252_weird_time_zone: [ OK ] 3.34 sec.
2025-04-01 21:46:06 Running about 214 stateless tests (Process-7).
2025-04-01 21:46:06 02513_parquet_orc_arrow_nullable_schema_inference: [ OK ] 6.81 sec.
2025-04-01 21:46:06 03202_enum_json_cast: [ OK ] 1.08 sec.
2025-04-01 21:46:06 01632_group_array_msan: [ OK ] 1.88 sec.
2025-04-01 21:46:07 02366_direct_dictionary_dict_has: [ OK ] 1.23 sec.
2025-04-01 21:46:07 00148_summing_merge_tree_nested_map_multiple_values: [ OK ] 1.38 sec.
2025-04-01 21:46:07 00412_logical_expressions_optimizer: [ OK ] 0.93 sec.
2025-04-01 21:46:08 00995_optimize_read_in_order_with_aggregation: [ OK ] 0.98 sec.
2025-04-01 21:46:08 01943_non_deterministic_order_key: [ OK ] 1.59 sec.
2025-04-01 21:46:08 02128_wait_end_of_query_fix: [ OK ] 1.88 sec.
2025-04-01 21:46:09 00718_format_datetime_1: [ OK ] 1.08 sec.
2025-04-01 21:46:09 02534_s3_heap_use_after_free: [ OK ] 0.93 sec.
2025-04-01 21:46:10 01825_type_json_order_by: [ OK ] 1.08 sec.
2025-04-01 21:46:11 03035_internal_functions_direct_call: [ OK ] 6.05 sec.
2025-04-01 21:46:11 02116_tuple_element_analyzer: [ OK ] 5.29 sec.
2025-04-01 21:46:12 01507_multiversion_storage_for_storagememory: [ OK ] 0.93 sec.
2025-04-01 21:46:12 00501_http_head: [ OK ] 2.10 sec.
2025-04-01 21:46:13 01621_decode_XML: [ OK ] 1.13 sec.
2025-04-01 21:46:13 02122_parallel_formatting_RowBinary: [ OK ] 7.30 sec.
2025-04-01 21:46:13 01890_cross_join_explain_crash: [ OK ] 0.93 sec.
2025-04-01 21:46:13 01146_clickhouse_local_data: [ OK ] 5.49 sec.
2025-04-01 21:46:13 01920_not_chain_format: [ OK ] 0.88 sec.
2025-04-01 21:46:14 00126_buffer: [ OK ] 2.09 sec.
2025-04-01 21:46:15 02552_inner_join_with_where_true: [ OK ] 0.98 sec.
2025-04-01 21:46:17 00965_send_logs_level_concurrent_queries: [ OK ] 3.56 sec.
2025-04-01 21:46:17 02429_groupBitmap_chain_state: [ OK ] 3.54 sec.
2025-04-01 21:46:18 02947_parallel_replicas_remote: [ OK ] 2.64 sec.
2025-04-01 21:46:18 02158_contingency: [ OK ] 1.24 sec.
2025-04-01 21:46:18 02862_uuid_reinterpret_as_numeric: [ OK ] 1.14 sec.
2025-04-01 21:46:19 03131_hilbert_coding: [ OK ] 5.77 sec.
2025-04-01 21:46:19 00660_optimize_final_without_partition: [ OK ] 1.34 sec.
2025-04-01 21:46:20 02733_fix_distinct_in_order_bug_49622: [ OK ] 0.94 sec.
2025-04-01 21:46:21 01536_fuzz_cast: [ OK ] 1.33 sec.
2025-04-01 21:46:24 01579_date_datetime_index_comparison: [ OK ] 2.14 sec.
2025-04-01 21:46:25 03224_invalid_alter: [ OK ] 11.43 sec.
2025-04-01 21:46:27 02771_semi_join_use_nulls: [ OK ] 17.20 sec.
2025-04-01 21:46:28 01268_shard_avgweighted: [ OK ] 2.70 sec.
2025-04-01 21:46:32 00096_aggregation_min_if: [ OK ] 13.50 sec.
2025-04-01 21:46:32 02995_index_8: [ SKIPPED ] 0.00 sec.
2025-04-01 21:46:32 Reason: not running for current build
2025-04-01 21:46:34 03227_distinct_dynamic_types_json_paths: [ OK ] 26.92 sec.
2025-04-01 21:46:36 03006_parallel_replicas_cte_explain_syntax_crash: [ OK ] 1.95 sec.
2025-04-01 21:46:36 02377_majority_insert_quorum_zookeeper_long: [ OK ] 12.30 sec.
2025-04-01 21:46:36 00926_adaptive_index_granularity_versioned_collapsing_merge_tree: [ OK ] 9.53 sec.
2025-04-01 21:46:38 02352_lightweight_delete_and_object_column: [ OK ] 2.23 sec.
2025-04-01 21:46:38 02509_h3_arguments: [ OK ] 1.58 sec.
2025-04-01 21:46:39 02244_casewithexpression_return_type: [ OK ] 1.22 sec.
2025-04-01 21:46:40 00461_default_value_of_argument_type: [ OK ] 1.76 sec.
2025-04-01 21:46:46 03254_trivial_merge_selector: [ OK ] 6.77 sec.
2025-04-01 21:46:47 03055_analyzer_subquery_group_array: [ OK ] 0.99 sec.
2025-04-01 21:46:49 02481_aggregation_in_order_plan: [ OK ] 1.49 sec.
2025-04-01 21:46:49 02122_parallel_formatting_CSVWithNames: [ OK ] 8.96 sec.
2025-04-01 21:46:50 01087_window_view_alter_query: [ OK ] 13.49 sec.
2025-04-01 21:46:50 02534_default_granularity: [ OK ] 0.99 sec.
2025-04-01 21:46:51 03255_fix_sbstrings_logical_error: [ OK ] 0.87 sec.
2025-04-01 21:46:52 02478_projection_with_group_by_alter: [ OK ] 2.39 sec.
2025-04-01 21:46:53 01583_const_column_in_set_index: [ OK ] 1.19 sec.
2025-04-01 21:46:53 00938_ipv6_cidr_range: [ OK ] 3.44 sec.
2025-04-01 21:46:54 02703_keeper_map_concurrent_create_drop: [ OK ] 35.45 sec.
2025-04-01 21:46:54 02994_cosineDistanceNullable: [ OK ] 1.10 sec.
2025-04-01 21:46:54 02597_column_update_and_replication: [ OK ] 3.14 sec.
2025-04-01 21:46:55 03197_fix_parse_mysql_iso_date: [ OK ] 1.12 sec.
2025-04-01 21:46:55 02123_MySQLWire_regression: [ OK ] 1.16 sec.
2025-04-01 21:46:55 02180_insert_into_values_settings: [ OK ] 0.99 sec.
2025-04-01 21:46:56 03250_json_group_by_sub_object_subcolumn: [ OK ] 1.21 sec.
2025-04-01 21:46:56 03035_morton_encode_no_rows: [ OK ] 1.04 sec.
2025-04-01 21:46:57 01533_distinct_nullable_uuid: [ OK ] 1.85 sec.
2025-04-01 21:46:57 00500_point_in_polygon_bug: [ OK ] 1.14 sec.
2025-04-01 21:46:57 02496_from_unixtime_in_joda_syntax: [ OK ] 3.09 sec.
2025-04-01 21:46:58 01016_input_null_as_default: [ OK ] 29.61 sec.
2025-04-01 21:46:58 03204_index_hint_fuzzer: [ OK ] 0.94 sec.
2025-04-01 21:46:59 03143_prewhere_profile_events: [ OK ] 26.67 sec.
2025-04-01 21:46:59 01213_alter_rename_primary_key_zookeeper_long: [ OK ] 2.53 sec.
2025-04-01 21:46:59 00364_java_style_denormals: [ OK ] 0.84 sec.
2025-04-01 21:46:59 03203_function_printf: [ OK ] 2.54 sec.
2025-04-01 21:46:59 03236_squashing_high_memory: [ SKIPPED ] 0.00 sec.
2025-04-01 21:46:59 Reason: not running for current build
2025-04-01 21:47:00 02315_grouping_constant_folding: [ OK ] 1.24 sec.
2025-04-01 21:47:01 03205_hashing_empty_tuples: [ OK ] 1.49 sec.
2025-04-01 21:47:01 02287_type_object_convert: [ OK ] 1.64 sec.
2025-04-01 21:47:01 01764_collapsing_merge_adaptive_granularity: [ OK ] 1.49 sec.
2025-04-01 21:47:01 00290_shard_aggregation_memory_efficient: [ OK ] 4.09 sec.
2025-04-01 21:47:02 01513_ilike_like_cache: [ OK ] 1.04 sec.
2025-04-01 21:47:02 03267_materialized_view_keeps_security_context: [ OK ] 1.09 sec.
2025-04-01 21:47:02 01319_optimize_skip_unused_shards_nesting: [ OK ] 2.04 sec.
2025-04-01 21:47:02 01062_alter_on_mutataion_zookeeper_long: [ OK ] 4.14 sec.
2025-04-01 21:47:03 02021_exponential_sum: [ OK ] 1.64 sec.
2025-04-01 21:47:03 00017_in_subquery_with_empty_result: [ OK ] 0.84 sec.
2025-04-01 21:47:03 02158_interval_length_sum: [ OK ] 0.89 sec.
2025-04-01 21:47:03 02834_alter_exception: [ OK ] 1.29 sec.
2025-04-01 21:47:04 02007_test_any_all_operators: [ OK ] 1.84 sec.
2025-04-01 21:47:04 01373_summing_merge_tree_explicit_columns_definition: [ OK ] 1.44 sec.
2025-04-01 21:47:04 01745_alter_delete_view: [ OK ] 1.54 sec.
2025-04-01 21:47:05 00184_shard_distributed_group_by_no_merge: [ OK ] 3.24 sec.
2025-04-01 21:47:05 02987_logical_optimizer_pass_lowcardinality: [ OK ] 1.09 sec.
2025-04-01 21:47:05 00255_array_concat_string: [ OK ] 1.94 sec.
2025-04-01 21:47:06 03032_string_to_variant_cast: [ OK ] 2.04 sec.
2025-04-01 21:47:07 01263_type_conversion_nvartolomei: [ OK ] 2.49 sec.
2025-04-01 21:47:07 00137_in_constants: [ OK ] 2.04 sec.
2025-04-01 21:47:07 02512_array_join_name_resolution: [ OK ] 0.99 sec.
2025-04-01 21:47:08 01746_convert_type_with_default: [ OK ] 2.74 sec.
2025-04-01 21:47:08 01051_random_printable_ascii: [ OK ] 0.94 sec.
2025-04-01 21:47:08 02715_bit_operations_float: [ OK ] 5.05 sec.
2025-04-01 21:47:08 00988_expansion_aliases_limit: [ OK ] 1.29 sec.
2025-04-01 21:47:09 03290_pr_non_replicated_in_subquery: [ OK ] 1.15 sec.
2025-04-01 21:47:09 02293_compatibility_ignore_auto_increment_in_create_table: [ OK ] 1.79 sec.
2025-04-01 21:47:10 02500_prevent_drop_nested_if_empty_part: [ OK ] 2.19 sec.
2025-04-01 21:47:10 02780_final_streams_data_skipping_index: [ OK ] 1.89 sec.
2025-04-01 21:47:11 02521_aggregation_by_partitions: [ OK ] 51.93 sec.
2025-04-01 21:47:11 02866_size_of_marks_skip_idx_explain: [ OK ] 0.99 sec.
2025-04-01 21:47:12 02869_insert_filenames_collisions: [ OK ] 2.59 sec.
2025-04-01 21:47:12 02122_parallel_formatting_Template: [ OK ] 7.10 sec.
2025-04-01 21:47:12 02861_replacing_merge_tree_with_cleanup: [ OK ] 1.34 sec.
2025-04-01 21:47:12 02179_sparse_columns_detach: [ OK ] 1.94 sec.
2025-04-01 21:47:13 01260_ubsan_decimal_parse: [ OK ] 0.93 sec.
2025-04-01 21:47:14 00703_join_crash: [ OK ] 1.19 sec.
2025-04-01 21:47:15 02770_async_buffer_ignore: [ OK ] 6.05 sec.
2025-04-01 21:47:15 03156_dynamic_type_concurrent_inserts: [ OK ] 4.49 sec.
2025-04-01 21:47:16 03271_benchmark_metrics: [ OK ] 8.16 sec.
2025-04-01 21:47:17 02916_joinget_dependency: [ OK ] 4.85 sec.
2025-04-01 21:47:17 01016_simhash_minhash_ppc: [ SKIPPED ] 0.00 sec.
2025-04-01 21:47:17 Reason: not running for current build
2025-04-01 21:47:17 01047_window_view_parser_inner_table: [ OK ] 3.50 sec.
2025-04-01 21:47:17 03209_parallel_replicas_order_by_all: [ OK ] 1.34 sec.
2025-04-01 21:47:18 Running about 214 stateless tests (Process-10).
2025-04-01 21:47:18 02530_dictionaries_update_field: [ OK ] 78.60 sec.
2025-04-01 21:47:18 02015_async_inserts_6: [ OK ] 5.10 sec.
2025-04-01 21:47:19 02469_fix_aliases_parser: [ OK ] 0.95 sec.
2025-04-01 21:47:19 00175_partition_by_ignore: [ OK ] 1.10 sec.
2025-04-01 21:47:19 01721_constraints_constant_expressions: [ OK ] 2.24 sec.
2025-04-01 21:47:20 01478_not_equi-join_on: [ OK ] 1.09 sec.
2025-04-01 21:47:21 02575_merge_prewhere_different_default_kind: [ OK ] 2.00 sec.
2025-04-01 21:47:21 02725_async_insert_table_setting: [ OK ] 5.76 sec.
2025-04-01 21:47:22 01509_parallel_quorum_insert_no_replicas_long: [ OK ] 3.50 sec.
2025-04-01 21:47:23 01236_graphite_mt: [ OK ] 1.89 sec.
2025-04-01 21:47:23 02882_clickhouse_keeper_client_no_confirmation: [ OK ] 2.60 sec.
2025-04-01 21:47:24 02481_low_cardinality_with_short_circuit_functins_mutations: [ OK ] 1.44 sec.
2025-04-01 21:47:24 03170_part_offset_as_table_column: [ OK ] 1.16 sec.
2025-04-01 21:47:25 02000_join_on_const: [ OK ] 8.00 sec.
2025-04-01 21:47:26 01889_key_condition_function_chains: [ OK ] 1.74 sec.
2025-04-01 21:47:27 02816_s2_invalid_point: [ OK ] 1.49 sec.
2025-04-01 21:47:28 01055_minmax_index_compact_parts: [ OK ] 8.66 sec.
2025-04-01 21:47:28 02430_bitmap_transform_exception_code: [ OK ] 1.55 sec.
2025-04-01 21:47:28 01546_log_queries_min_query_duration_ms: [ OK ] 4.99 sec.
2025-04-01 21:47:29 01069_set_in_group_by: [ OK ] 1.04 sec.
2025-04-01 21:47:30 01279_dist_group_by: [ OK ] 1.29 sec.
2025-04-01 21:47:30 02371_analyzer_join_cross: [ OK ] 2.80 sec.
2025-04-01 21:47:31 02901_predicate_pushdown_cte_stateful: [ OK ] 1.04 sec.
2025-04-01 21:47:31 00214_primary_key_order: [ OK ] 1.24 sec.
2025-04-01 21:47:31 02896_union_distinct_http_format: [ OK ] 2.04 sec.
2025-04-01 21:47:32 03008_groupSortedArray_field: [ OK ] 0.99 sec.
2025-04-01 21:47:33 03201_local_named_collections: [ OK ] 4.51 sec.
2025-04-01 21:47:33 00577_replacing_merge_tree_vertical_merge: [ OK ] 1.85 sec.
2025-04-01 21:47:33 02962_analyzer_const_in_count_distinct: [ OK ] 0.99 sec.
2025-04-01 21:47:34 00905_field_with_aggregate_function_state: [ OK ] 1.09 sec.
2025-04-01 21:47:34 00720_combinations_of_aggregate_combinators: [ OK ] 1.14 sec.
2025-04-01 21:47:34 00974_bitmapContains_with_primary_key: [ OK ] 1.24 sec.
2025-04-01 21:47:35 01503_if_const_optimization: [ OK ] 0.89 sec.
2025-04-01 21:47:35 02834_remote_session_log: [ OK ] 23.15 sec.
2025-04-01 21:47:36 02861_index_set_incorrect_args: [ OK ] 1.84 sec.
2025-04-01 21:47:36 02243_arrow_read_null_type_to_nullable_column: [ OK ] 5.75 sec.
2025-04-01 21:47:37 00549_join_use_nulls: [ OK ] 1.10 sec.
2025-04-01 21:47:37 01443_merge_truncate_long: [ OK ] 22.00 sec.
2025-04-01 21:47:38 02315_replace_multiif_to_if: [ OK ] 0.94 sec.
2025-04-01 21:47:39 00701_rollup: [ OK ] 1.60 sec.
2025-04-01 21:47:41 01332_join_type_syntax_position: [ OK ] 2.24 sec.
2025-04-01 21:47:42 03008_s3_plain_rewritable: [ OK ] 18.40 sec.
2025-04-01 21:47:42 01299_alter_merge_tree: [ OK ] 1.34 sec.
2025-04-01 21:47:42 00699_materialized_view_mutations: [ OK ] 6.91 sec.
2025-04-01 21:47:43 00754_distributed_optimize_skip_select_on_unused_shards_with_prewhere: [ OK ] 23.08 sec.
2025-04-01 21:47:43 00624_length_utf8: [ OK ] 0.89 sec.
2025-04-01 21:47:43 02662_sparse_columns_mutations_5: [ OK ] 1.24 sec.
2025-04-01 21:47:43 00190_non_constant_array_of_constant_data: [ OK ] 1.34 sec.
2025-04-01 21:47:44 01056_create_table_as: [ OK ] 4.60 sec.
2025-04-01 21:47:44 00852_any_join_nulls: [ OK ] 1.19 sec.
2025-04-01 21:47:45 02542_case_no_else: [ OK ] 1.09 sec.
2025-04-01 21:47:45 02990_variant_where_cond: [ OK ] 1.79 sec.
2025-04-01 21:47:45 01457_min_index_granularity_bytes_setting: [ OK ] 1.84 sec.
2025-04-01 21:47:46 00479_date_and_datetime_to_number: [ OK ] 0.99 sec.
2025-04-01 21:47:46 01210_drop_view: [ OK ] 0.94 sec.
2025-04-01 21:47:46 02997_insert_select_too_many_parts_multithread: [ SKIPPED ] 0.00 sec.
2025-04-01 21:47:46 Reason: disabled
2025-04-01 21:47:47 00751_default_databasename_for_view: [ OK ] 1.64 sec.
2025-04-01 21:47:47 00961_visit_param_buffer_underflow: [ OK ] 0.94 sec.
2025-04-01 21:47:48 01709_inactive_parts_to_throw_insert: [ OK ] 1.84 sec.
2025-04-01 21:47:49 00557_alter_null_storage_tables: [ OK ] 0.99 sec.
2025-04-01 21:47:49 01888_read_int_safe: [ OK ] 6.36 sec.
2025-04-01 21:47:50 01434_netloc_fuzz: [ OK ] 0.89 sec.
2025-04-01 21:47:50 02771_parallel_replicas_analyzer: [ OK ] 3.10 sec.
2025-04-01 21:47:51 03127_window_functions_uint16: [ OK ] 1.44 sec.
2025-04-01 21:47:51 01044_great_circle_angle: [ OK ] 1.04 sec.
2025-04-01 21:47:52 00233_position_function_family: [ OK ] 17.63 sec.
2025-04-01 21:47:52 00270_views_query_processing_stage: [ OK ] 1.09 sec.
2025-04-01 21:47:52 03240_cte_in_subquery: [ OK ] 1.24 sec.
2025-04-01 21:47:53 03023_analyzer_optimize_group_by_function_keys_with_nulls: [ OK ] 0.94 sec.
2025-04-01 21:47:53 01815_with_mergeable_state_after_aggregation_and_limit: [ OK ] 2.69 sec.
2025-04-01 21:47:53 00825_protobuf_format_table_default: [ OK ] 6.15 sec.
2025-04-01 21:47:54 01867_support_datetime64_version_column: [ OK ] 1.39 sec.
2025-04-01 21:47:54 02047_alias_for_table_and_database_name: [ OK ] 0.94 sec.
2025-04-01 21:47:55 01659_array_aggregation_ubsan: [ OK ] 0.90 sec.
2025-04-01 21:47:56 00715_json_each_row_input_nested: [ OK ] 12.27 sec.
2025-04-01 21:47:57 02438_sync_replica_lightweight: [ OK ] 3.75 sec.
2025-04-01 21:47:57 02719_aggregate_with_empty_string_key: [ OK ] 0.94 sec.
2025-04-01 21:47:57 01698_map_populate_overflow: [ OK ] 1.74 sec.
2025-04-01 21:48:00 02805_distributed_queries_timeouts: [ OK ] 24.95 sec.
2025-04-01 21:48:00 01293_client_interactive_vertical_multiline: [ OK ] 6.01 sec.
2025-04-01 21:48:01 02896_illegal_sampling: [ OK ] 1.19 sec.
2025-04-01 21:48:02 01582_distinct_optimization: [ OK ] 5.11 sec.
2025-04-01 21:48:02 00900_parquet_time_to_ch_date_time: [ OK ] 5.15 sec.
2025-04-01 21:48:02 02153_clickhouse_local_profile_info: [ OK ] 2.44 sec.
2025-04-01 21:48:03 02808_aliases_inside_case: [ OK ] 0.89 sec.
2025-04-01 21:48:03 01053_window_view_proc_hop_to_now: [ OK ] 10.81 sec.
2025-04-01 21:48:04 02393_every_metric_must_have_documentation: [ OK ] 0.94 sec.
2025-04-01 21:48:04 03096_update_non_indexed_columns: [ OK ] 1.59 sec.
2025-04-01 21:48:04 01070_template_empty_file: [ OK ] 1.04 sec.
2025-04-01 21:48:05 02236_explain_pipeline_join: [ OK ] 0.99 sec.
2025-04-01 21:48:05 03201_sumIf_to_countIf_return_type: [ OK ] 0.99 sec.
2025-04-01 21:48:06 02286_quantile_tdigest_infinity: [ OK ] 5.35 sec.
2025-04-01 21:48:07 03050_select_one_one_one: [ OK ] 0.95 sec.
2025-04-01 21:48:09 00670_truncate_temporary_table: [ OK ] 1.20 sec.
2025-04-01 21:48:11 02706_kolmogorov_smirnov_test_scipy: [ OK ] 18.64 sec.
2025-04-01 21:48:12 02282_array_distance: [ OK ] 7.81 sec.
2025-04-01 21:48:12 02346_inverted_index_experimental_flag: [ OK ] 3.35 sec.
2025-04-01 21:48:13 01560_monotonicity_check_multiple_args_bug: [ OK ] 1.05 sec.
2025-04-01 21:48:13 01913_summing_mt_and_simple_agg_function_with_lc: [ OK ] 1.25 sec.
2025-04-01 21:48:13 00838_system_tables_drop_table_race: [ OK ] 10.97 sec.
2025-04-01 21:48:14 02001_select_with_filter: [ OK ] 1.09 sec.
2025-04-01 21:48:14 01916_low_cardinality_interval: [ OK ] 1.04 sec.
2025-04-01 21:48:15 01245_distributed_group_by_no_merge_with-extremes_and_totals: [ OK ] 9.72 sec.
2025-04-01 21:48:16 01031_new_any_join: [ OK ] 2.25 sec.
2025-04-01 21:48:17 01441_low_cardinality_array_index: [ OK ] 19.75 sec.
2025-04-01 21:48:18 02865_array_join_with_max_block_size: [ OK ] 3.85 sec.
2025-04-01 21:48:20 02346_additional_filters: [ OK ] 4.85 sec.
2025-04-01 21:48:20 02417_repeat_input_commands: [ OK ] 3.05 sec.
2025-04-01 21:48:20 03271_dynamic_variant_in_min_max: [ OK ] 5.71 sec.
2025-04-01 21:48:20 01062_max_parser_depth: [ OK ] 2.04 sec.
2025-04-01 21:48:21 02004_intersect_except_const_column: [ OK ] 1.49 sec.
2025-04-01 21:48:21 00780_unaligned_array_join: [ OK ] 0.94 sec.
2025-04-01 21:48:21 02293_part_log_has_merge_reason: [ OK ] 9.92 sec.
2025-04-01 21:48:22 02281_limit_by_distributed: [ OK ] 1.29 sec.
2025-04-01 21:48:22 01137_order_by_func: [ OK ] 17.23 sec.
2025-04-01 21:48:23 03256_merges: [ OK ] 1.84 sec.
2025-04-01 21:48:23 02482_load_parts_refcounts: [ OK ] 7.48 sec.
2025-04-01 21:48:23 02205_ephemeral_1: [ OK ] 1.89 sec.
2025-04-01 21:48:24 01570_aggregator_combinator_simple_state: [ OK ] 2.35 sec.
2025-04-01 21:48:24 02893_system_drop_schema_cache_format: [ OK ] 0.74 sec.
2025-04-01 21:48:24 01355_defaultValueOfArgumentType_bug: [ OK ] 0.94 sec.
2025-04-01 21:48:25 00098_5_union_all: [ OK ] 1.24 sec.
2025-04-01 21:48:25 00174_compare_date_time_with_constant_string_in_in: [ OK ] 0.99 sec.
2025-04-01 21:48:26 02302_column_decl_null_before_defaul_value: [ OK ] 2.24 sec.
2025-04-01 21:48:26 01278_alter_rename_combination: [ OK ] 1.54 sec.
2025-04-01 21:48:26 02813_system_licenses_base: [ OK ] 0.89 sec.
2025-04-01 21:48:27 01710_projection_materialize_with_missing_columns: [ OK ] 1.24 sec.
2025-04-01 21:48:27 01710_projection_with_joins: [ OK ] 1.75 sec.
2025-04-01 21:48:28 02515_distinct_zero_size_key_bug_44831: [ OK ] 0.79 sec.
2025-04-01 21:48:28 03036_test_parquet_bloom_filter_push_down_ipv6: [ OK ] 7.96 sec.
2025-04-01 21:48:28 00557_array_resize: [ OK ] 1.45 sec.
2025-04-01 21:48:29 01061_window_view_event_hop_to_asc: [ OK ] 7.51 sec.
2025-04-01 21:48:29 01277_alter_rename_column_constraint_zookeeper_long: [ OK ] 3.55 sec.
2025-04-01 21:48:29 02828_create_as_table_function_rename: [ OK ] 1.04 sec.
2025-04-01 21:48:29 01085_window_view_attach: [ OK ] 1.52 sec.
2025-04-01 21:48:31 03252_fill_missed_arrays: [ OK ] 1.74 sec.
2025-04-01 21:48:31 03033_analyzer_query_parameters: [ OK ] 3.21 sec.
2025-04-01 21:48:32 02367_join_pushdown_column_not_found: [ OK ] 1.29 sec.
2025-04-01 21:48:33 00512_fractional_time_zones: [ OK ] 4.05 sec.
2025-04-01 21:48:33 00701_context_use_after_free: [ OK ] 0.99 sec.
2025-04-01 21:48:34 02810_convert_uuid_to_uint128: [ OK ] 2.85 sec.
2025-04-01 21:48:34 01913_if_int_decimal: [ OK ] 0.96 sec.
2025-04-01 21:48:34 02943_alter_user_modify_profiles_and_settings: [ OK ] 57.60 sec.
2025-04-01 21:48:35 01247_optimize_distributed_group_by_sharding_key_dist_on_dist: [ OK ] 2.25 sec.
2025-04-01 21:48:36 00036_array_element: [ OK ] 1.59 sec.
2025-04-01 21:48:37 00975_indices_mutation_replicated_zookeeper_long: [ OK ] 14.12 sec.
2025-04-01 21:48:37 00532_topk_generic: [ OK ] 1.04 sec.
2025-04-01 21:48:37 02012_get_server_port: [ OK ] 1.59 sec.
2025-04-01 21:48:38 01672_test_toSecond_mysql_dialect: [ OK ] 0.89 sec.
2025-04-01 21:48:39 02916_date_text_parsing: [ OK ] 2.20 sec.
2025-04-01 21:48:41 01754_cluster_all_replicas_shard_num: [ OK ] 1.89 sec.
2025-04-01 21:48:42 03023_group_by_use_nulls_analyzer_crashes: [ OK ] 3.75 sec.
2025-04-01 21:48:42 01679_incorrect_data_on_insert_collapsing: [ OK ] 5.05 sec.
2025-04-01 21:48:43 00642_cast: [ OK ] 1.30 sec.
2025-04-01 21:48:44 00576_nested_and_prewhere: [ OK ] 1.95 sec.
2025-04-01 21:48:45 02536_distributed_detach_table: [ OK ] 1.49 sec.
2025-04-01 21:48:45 01117_greatest_least_case: [ OK ] 0.94 sec.
2025-04-01 21:48:46 00702_where_with_quailified_names: [ OK ] 0.99 sec.
2025-04-01 21:48:46 03205_overlay: [ OK ] 4.90 sec.
2025-04-01 21:48:47 02416_json_object_inference: [ OK ] 1.34 sec.
2025-04-01 21:48:49 01510_format_regexp_raw_low_cardinality: [ OK ] 4.15 sec.
2025-04-01 21:48:51 01462_test_codec_on_alias: [ OK ] 1.99 sec.
2025-04-01 21:48:52 02887_byteswap: [ OK ] 4.60 sec.
2025-04-01 21:48:52 02506_date_time64_floating_point_negative_value: [ OK ] 1.09 sec.
2025-04-01 21:48:54 02918_wrong_dictionary_source: [ OK ] 1.59 sec.
2025-04-01 21:48:54 02458_relax_too_many_parts: [ OK ] 2.65 sec.
2025-04-01 21:48:55 02202_use_skip_indexes_if_final: [ OK ] 1.50 sec.
2025-04-01 21:48:57 03232_json_uniq_group_by: [ OK ] 2.15 sec.
2025-04-01 21:48:57 02677_get_subcolumn_array_of_tuples: [ OK ] 1.14 sec.
2025-04-01 21:48:57 02369_lost_part_intersecting_merges: [ OK ] 23.40 sec.
2025-04-01 21:48:58 00994_table_function_numbers_mt: [ OK ] 1.14 sec.
2025-04-01 21:48:58 00534_functions_bad_arguments6: [ SKIPPED ] 0.00 sec.
2025-04-01 21:48:58 Reason: not running for current build
2025-04-01 21:49:00 00498_array_functions_concat_slice_push_pop: [ OK ] 13.37 sec.
2025-04-01 21:49:00 00652_mergetree_mutations: [ OK ] 26.00 sec.
2025-04-01 21:49:01 01632_select_all_syntax: [ OK ] 1.55 sec.
2025-04-01 21:49:02 02588_parquet_bug: [ OK ] 4.20 sec.
2025-04-01 21:49:04 02346_additional_filters_index: [ OK ] 1.85 sec.
2025-04-01 21:49:06 02836_file_diagnostics_while_reading_header: [ OK ] 4.60 sec.
2025-04-01 21:49:06 01669_join_or_duplicates: [ OK ] 1.84 sec.
2025-04-01 21:49:06 02446_parent_zero_copy_locks: [ OK ] 6.66 sec.
2025-04-01 21:49:07 00548_slice_of_nested: [ OK ] 0.95 sec.
2025-04-01 21:49:07 01142_with_ties_and_aliases: [ OK ] 1.31 sec.
2025-04-01 21:49:09 00226_zookeeper_deduplication_and_unexpected_parts_long: [ OK ] 2.15 sec.
2025-04-01 21:49:10 01370_client_autocomplete_word_break_characters: [ OK ] 3.82 sec.
2025-04-01 21:49:11 02858_explicit_uuid_and_zk_path: [ OK ] 13.72 sec.
2025-04-01 21:49:12 01823_array_low_cardinality_KuliginStepan: [ OK ] 1.15 sec.
2025-04-01 21:49:13 00678_murmurhash: [ OK ] 2.01 sec.
2025-04-01 21:49:14 02943_use_full_text_skip_index_with_has_any: [ OK ] 2.20 sec.
2025-04-01 21:49:15 01561_mann_whitney_scipy: [ OK ] 5.91 sec.
2025-04-01 21:49:16 03146_tpc_ds_grouping: [ OK ] 1.64 sec.
2025-04-01 21:49:17 00575_merge_and_index_with_function_in_in: [ OK ] 1.50 sec.
2025-04-01 21:49:19 00500_point_in_polygon_nan: [ OK ] 1.40 sec.
2025-04-01 21:49:28 01293_show_clusters: [ OK ] 9.25 sec.
2025-04-01 21:49:31 03001_max_parallel_replicas_zero_value: [ OK ] 2.02 sec.
2025-04-01 21:49:34 02445_replicated_db_alter_partition: [ OK ] 64.85 sec.
2025-04-01 21:49:36 02470_mutation_sync_race: [ OK ] 39.33 sec.
2025-04-01 21:49:37 02293_grouping_function: [ OK ] 6.02 sec.
2025-04-01 21:49:38 01280_opencl_bitonic_order_by: [ OK ] 3.61 sec.
2025-04-01 21:49:40 02943_positional_arguments_bugs: [ OK ] 3.21 sec.
2025-04-01 21:49:41 01341_datetime64_wrong_supertype: [ OK ] 2.49 sec.
2025-04-01 21:49:42 00927_asof_joins: [ OK ] 4.62 sec.
2025-04-01 21:49:43 00830_join_overwrite: [ OK ] 3.18 sec.
2025-04-01 21:49:47 02015_shard_crash_clang_12_build: [ OK ] 40.29 sec.
2025-04-01 21:49:48 01493_alter_remove_properties_zookeeper: [ OK ] 7.18 sec.
2025-04-01 21:49:49 01042_h3_k_ring: [ OK ] 7.02 sec.
2025-04-01 21:49:49 00438_bit_rotate: [ OK ] 1.61 sec.
2025-04-01 21:49:50 01658_test_base64Encode_mysql_compatibility: [ OK ] 1.48 sec.
2025-04-01 21:49:51 03247_create-same-table-concurrently-with-atomic-engine: [ OK ] 35.21 sec.
2025-04-01 21:49:52 03006_join_on_inequal_expression_fast: [ OK ] 38.02 sec.
2025-04-01 21:49:53 00972_desc_table_virtual_columns: [ OK ] 1.51 sec.
2025-04-01 21:49:53 01883_with_grouping_sets: [ OK ] 3.47 sec.
2025-04-01 21:49:53 02105_backslash_letter_commands: [ OK ] 3.74 sec.
2025-04-01 21:49:53 02676_distinct_reading_in_order_analyzer: [ OK ] 1.42 sec.
2025-04-01 21:49:54 03090_analyzer_multiple_using_statements: [ OK ] 1.30 sec.
2025-04-01 21:49:54 03093_analyzer_column_alias: [ OK ] 1.05 sec.
2025-04-01 21:49:55 02384_decrypt_bad_arguments: [ OK ] 1.84 sec.
2025-04-01 21:49:56 00816_join_column_names_sarg: [ OK ] 1.24 sec.
2025-04-01 21:49:56 01052_compression_buffer_overrun: [ OK ] 2.79 sec.
2025-04-01 21:49:56 02212_cte_and_table_alias: [ OK ] 1.21 sec.
2025-04-01 21:49:56 02918_alter_temporary_table: [ OK ] 1.55 sec.
2025-04-01 21:49:57 03017_analyzer_groupby_fuzz_61600: [ OK ] 1.16 sec.
2025-04-01 21:49:57 02835_drop_user_during_session: [ OK ] 89.34 sec.
2025-04-01 21:49:58 02532_profileevents_server_startup_time: [ OK ] 0.94 sec.
2025-04-01 21:49:58 02864_statistics_usage: [ OK ] 2.42 sec.
2025-04-01 21:49:58 00751_hashing_ints: [ OK ] 1.26 sec.
2025-04-01 21:49:58 03004_json_named_tuples_inference_ambiguous_paths_as_string: [ OK ] 0.89 sec.
2025-04-01 21:49:59 01755_client_highlight_multi_line_comment_regression: [ OK ] 3.07 sec.
2025-04-01 21:50:00 01602_temporary_table_in_system_tables: [ OK ] 1.20 sec.
2025-04-01 21:50:00 01083_log_first_column_alias: [ OK ] 1.02 sec.
2025-04-01 21:50:00 02768_cse_nested_distributed: [ OK ] 2.46 sec.
2025-04-01 21:50:00 01016_index_tuple_field_type: [ OK ] 1.91 sec.
2025-04-01 21:50:01 01086_modulo_or_zero: [ OK ] 1.15 sec.
2025-04-01 21:50:01 03164_analyzer_rewrite_aggregate_function_with_if: [ OK ] 1.06 sec.
2025-04-01 21:50:02 01357_version_collapsing_attach_detach_zookeeper: [ OK ] 1.46 sec.
2025-04-01 21:50:03 02366_with_fill_date: [ OK ] 1.41 sec.
2025-04-01 21:50:03 00142_parse_timestamp_as_datetime: [ OK ] 3.46 sec.
2025-04-01 21:50:03 02841_with_clause_resolve: [ OK ] 5.05 sec.
2025-04-01 21:50:04 01030_concatenate_equal_fixed_strings: [ OK ] 0.99 sec.
2025-04-01 21:50:06 02888_system_tables_with_inaccessible_table_function: [ OK ] 3.00 sec.
2025-04-01 21:50:07 02006_test_positional_arguments: [ OK ] 5.03 sec.
2025-04-01 21:50:07 02467_set_with_lowcardinality_type: [ OK ] 1.45 sec.
2025-04-01 21:50:10 02841_valid_json_and_xml_on_http_exception: [ OK ] 100.49 sec.
2025-04-01 21:50:10 01753_fix_clickhouse_format: [ OK ] 3.55 sec.
2025-04-01 21:50:11 00719_format_datetime_rand: [ OK ] 6.71 sec.
2025-04-01 21:50:12 03243_compatibility_setting_with_alias: [ OK ] 1.45 sec.
2025-04-01 21:50:12 00381_first_significant_subdomain: [ OK ] 0.96 sec.
2025-04-01 21:50:14 02307_join_get_array_null: [ OK ] 1.30 sec.
2025-04-01 21:50:17 01940_pad_string: [ OK ] 3.58 sec.
2025-04-01 21:50:18 02103_with_names_and_types_parallel_parsing: [ OK ] 16.64 sec.
2025-04-01 21:50:19 01820_unhex_case_insensitive: [ OK ] 0.98 sec.
2025-04-01 21:50:19 01760_modulo_negative: [ OK ] 1.77 sec.
2025-04-01 21:50:20 01292_optimize_data_skip_idx_order_by_expr: [ OK ] 1.15 sec.
2025-04-01 21:50:24 00965_shard_unresolvable_addresses: [ OK ] 34.18 sec.
2025-04-01 21:50:25 01686_event_time_microseconds_part_log: [ OK ] 17.69 sec.
2025-04-01 21:50:26 02845_join_on_cond_sparse: [ OK ] 1.72 sec.
2025-04-01 21:50:27 00481_reading_from_last_granula: [ OK ] 1.42 sec.
2025-04-01 21:50:28 01256_negative_generate_random: [ OK ] 2.65 sec.
2025-04-01 21:50:29 00712_prewhere_with_alias_and_virtual_column: [ OK ] 1.20 sec.
2025-04-01 21:50:30 01019_Buffer_and_max_memory_usage: [ OK ] 10.71 sec.
2025-04-01 21:50:30 00049_any_left_join: [ OK ] 1.05 sec.
2025-04-01 21:50:31 03231_prewhere_conditions_order: [ OK ] 0.99 sec.
2025-04-01 21:50:32 02994_libarchive_compression: [ OK ] 19.56 sec.
2025-04-01 21:50:33 02364_setting_cross_to_inner_rewrite: [ OK ] 2.21 sec.
2025-04-01 21:50:33 00056_join_number_string: [ OK ] 1.04 sec.
2025-04-01 21:50:34 01943_pmj_non_joined_stuck: [ OK ] 2.95 sec.
2025-04-01 21:50:34 00436_fixed_string_16_comparisons: [ OK ] 1.85 sec.
2025-04-01 21:50:35 02371_select_projection_normal_agg: [ OK ] 25.51 sec.
2025-04-01 21:50:35 03015_analyzer_groupby_fuzz_60772: [ OK ] 1.19 sec.
2025-04-01 21:50:35 00910_zookeeper_custom_compression_codecs_replicated_long: [ OK ] 7.97 sec.
2025-04-01 21:50:36 00525_aggregate_functions_of_nullable_that_return_non_nullable: [ OK ] 0.95 sec.
2025-04-01 21:50:36 02435_rollback_cancelled_queries: [ OK ] 32.59 sec.
2025-04-01 21:50:36 01666_great_circle_distance_ubsan: [ OK ] 0.95 sec.
2025-04-01 21:50:36 01655_window_functions_null: [ OK ] 0.95 sec.
2025-04-01 21:50:37 02944_variant_as_common_type: [ FAIL ] 3.86 sec.
2025-04-01 21:50:37 Reason: result differs with reference:
2025-04-01 21:50:37 --- /repo/tests/queries/0_stateless/02944_variant_as_common_type.reference 2025-04-01 21:41:49.918105468 +0000
2025-04-01 21:50:37 +++ /repo/tests/queries/0_stateless/02944_variant_as_common_type.stdout 2025-04-01 21:50:37.048927068 +0000
2025-04-01 21:50:37 @@ -1,51 +1,51 @@
2025-04-01 21:50:37 -Array(UInt8) [1,2,3]
2025-04-01 21:50:37 -Array(UInt8) [1,2,3]
2025-04-01 21:50:37 -String str_1
2025-04-01 21:50:37 -Nullable(String) str_1
2025-04-01 21:50:37 -String str_1
2025-04-01 21:50:37 -Nullable(String) str_1
2025-04-01 21:50:37 -Variant(Array(UInt8), String) str_1
2025-04-01 21:50:37 -Variant(Array(UInt8), String) str_1
2025-04-01 21:50:37 -Array(UInt8) [1,2,3]
2025-04-01 21:50:37 -Array(UInt8) [1,2,3]
2025-04-01 21:50:37 -String str_1
2025-04-01 21:50:37 -Nullable(String) str_1
2025-04-01 21:50:37 -String str_1
2025-04-01 21:50:37 -Nullable(String) str_1
2025-04-01 21:50:37 -Variant(Array(UInt8), String) str_1
2025-04-01 21:50:37 -Variant(Array(UInt8), String) str_1
2025-04-01 21:50:37 -Array(UInt8) [1,2,3]
2025-04-01 21:50:37 -Array(UInt8) [1,2,3]
2025-04-01 21:50:37 -String str_1
2025-04-01 21:50:37 -Nullable(String) str_1
2025-04-01 21:50:37 -String str_1
2025-04-01 21:50:37 -Nullable(String) str_1
2025-04-01 21:50:37 -Variant(Array(UInt8), String) str_1
2025-04-01 21:50:37 -Variant(Array(UInt8), String) str_1
2025-04-01 21:50:37 -String str_0
2025-04-01 21:50:37 -String str_1
2025-04-01 21:50:37 -String str_2
2025-04-01 21:50:37 -String str_3
2025-04-01 21:50:37 -Nullable(String) str_0
2025-04-01 21:50:37 -Nullable(String) str_1
2025-04-01 21:50:37 -Nullable(String) str_2
2025-04-01 21:50:37 -Nullable(String) str_3
2025-04-01 21:50:37 -Array(UInt64) [0]
2025-04-01 21:50:37 -Array(UInt64) [0,1]
2025-04-01 21:50:37 -Array(UInt64) [0,1,2]
2025-04-01 21:50:37 -Array(UInt64) [0,1,2,3]
2025-04-01 21:50:37 -Array(UInt64) [0]
2025-04-01 21:50:37 -Array(UInt64) [0,1]
2025-04-01 21:50:37 -Array(UInt64) [0,1,2]
2025-04-01 21:50:37 -Array(UInt64) [0,1,2,3]
2025-04-01 21:50:37 -String str_0
2025-04-01 21:50:37 -String str_1
2025-04-01 21:50:37 -String str_2
2025-04-01 21:50:37 -String str_3
2025-04-01 21:50:37 -Nullable(String) str_0
2025-04-01 21:50:37 -Nullable(String) str_1
2025-04-01 21:50:37 -Nullable(String) str_2
2025-04-01 21:50:37 -Nullable(String) str_3
2025-04-01 21:50:37 +Variant(Array(UInt8), String) [1,2,3]
2025-04-01 21:50:37 +Variant(Array(UInt8), String) [1,2,3]
2025-04-01 21:50:37 +Variant(Array(UInt8), String) str_1
2025-04-01 21:50:37 +Variant(Array(UInt8), String) str_1
2025-04-01 21:50:37 +Variant(Array(UInt8), String) str_1
2025-04-01 21:50:37 +Variant(Array(UInt8), String) str_1
2025-04-01 21:50:37 +Variant(Array(UInt8), String) str_1
2025-04-01 21:50:37 +Variant(Array(UInt8), String) str_1
2025-04-01 21:50:37 +Variant(Array(UInt8), String) [1,2,3]
2025-04-01 21:50:37 +Variant(Array(UInt8), String) [1,2,3]
2025-04-01 21:50:37 +Variant(Array(UInt8), String) str_1
2025-04-01 21:50:37 +Variant(Array(UInt8), String) str_1
2025-04-01 21:50:37 +Variant(Array(UInt8), String) str_1
2025-04-01 21:50:37 +Variant(Array(UInt8), String) str_1
2025-04-01 21:50:37 +Variant(Array(UInt8), String) str_1
2025-04-01 21:50:37 +Variant(Array(UInt8), String) str_1
2025-04-01 21:50:37 +Variant(Array(UInt8), String) [1,2,3]
2025-04-01 21:50:37 +Variant(Array(UInt8), String) [1,2,3]
2025-04-01 21:50:37 +Variant(Array(UInt8), String) str_1
2025-04-01 21:50:37 +Variant(Array(UInt8), String) str_1
2025-04-01 21:50:37 +Variant(Array(UInt8), String) str_1
2025-04-01 21:50:37 +Variant(Array(UInt8), String) str_1
2025-04-01 21:50:37 +Variant(Array(UInt8), String) str_1
2025-04-01 21:50:37 +Variant(Array(UInt8), String) str_1
2025-04-01 21:50:37 +Variant(Array(UInt64), String) str_0
2025-04-01 21:50:37 +Variant(Array(UInt64), String) str_1
2025-04-01 21:50:37 +Variant(Array(UInt64), String) str_2
2025-04-01 21:50:37 +Variant(Array(UInt64), String) str_3
2025-04-01 21:50:37 +Variant(Array(UInt64), String) str_0
2025-04-01 21:50:37 +Variant(Array(UInt64), String) str_1
2025-04-01 21:50:37 +Variant(Array(UInt64), String) str_2
2025-04-01 21:50:37 +Variant(Array(UInt64), String) str_3
2025-04-01 21:50:37 +Variant(Array(UInt64), String) [0]
2025-04-01 21:50:37 +Variant(Array(UInt64), String) [0,1]
2025-04-01 21:50:37 +Variant(Array(UInt64), String) [0,1,2]
2025-04-01 21:50:37 +Variant(Array(UInt64), String) [0,1,2,3]
2025-04-01 21:50:37 +Variant(Array(UInt64), String) [0]
2025-04-01 21:50:37 +Variant(Array(UInt64), String) [0,1]
2025-04-01 21:50:37 +Variant(Array(UInt64), String) [0,1,2]
2025-04-01 21:50:37 +Variant(Array(UInt64), String) [0,1,2,3]
2025-04-01 21:50:37 +Variant(Array(UInt64), String) str_0
2025-04-01 21:50:37 +Variant(Array(UInt64), String) str_1
2025-04-01 21:50:37 +Variant(Array(UInt64), String) str_2
2025-04-01 21:50:37 +Variant(Array(UInt64), String) str_3
2025-04-01 21:50:37 +Variant(Array(UInt64), String) str_0
2025-04-01 21:50:37 +Variant(Array(UInt64), String) str_1
2025-04-01 21:50:37 +Variant(Array(UInt64), String) str_2
2025-04-01 21:50:37 +Variant(Array(UInt64), String) str_3
2025-04-01 21:50:37 Variant(Array(UInt64), String) str_0
2025-04-01 21:50:37 Variant(Array(UInt64), String) str_1
2025-04-01 21:50:37 Variant(Array(UInt64), String) str_2
2025-04-01 21:50:37
2025-04-01 21:50:37
2025-04-01 21:50:37 Settings used in the test: --max_insert_threads 3 --group_by_two_level_threshold 1 --group_by_two_level_threshold_bytes 43964857 --distributed_aggregation_memory_efficient 0 --fsync_metadata 0 --output_format_parallel_formatting 1 --input_format_parallel_parsing 0 --min_chunk_bytes_for_parallel_parsing 7765296 --max_read_buffer_size 556013 --prefer_localhost_replica 0 --max_block_size 13304 --max_joined_block_size_rows 74116 --max_threads 2 --optimize_append_index 1 --optimize_if_chain_to_multiif 1 --optimize_if_transform_strings_to_enum 0 --optimize_read_in_order 1 --optimize_or_like_chain 0 --optimize_substitute_columns 1 --enable_multiple_prewhere_read_steps 1 --read_in_order_two_level_merge_threshold 63 --optimize_aggregation_in_order 0 --aggregation_in_order_max_block_bytes 7168480 --use_uncompressed_cache 1 --min_bytes_to_use_direct_io 10737418240 --min_bytes_to_use_mmap_io 1 --local_filesystem_read_method pread_threadpool --remote_filesystem_read_method threadpool --local_filesystem_read_prefetch 0 --filesystem_cache_segments_batch_size 5 --read_from_filesystem_cache_if_exists_otherwise_bypass_cache 0 --throw_on_error_from_cache_on_write_operations 0 --remote_filesystem_read_prefetch 0 --allow_prefetched_read_pool_for_remote_filesystem 1 --filesystem_prefetch_max_memory_usage 64Mi --filesystem_prefetches_limit 10 --filesystem_prefetch_min_bytes_for_single_read_task 1Mi --filesystem_prefetch_step_marks 50 --filesystem_prefetch_step_bytes 0 --compile_aggregate_expressions 1 --compile_sort_description 1 --merge_tree_coarse_index_granularity 16 --optimize_distinct_in_order 0 --max_bytes_before_external_sort 7235980109 --max_bytes_before_external_group_by 7485155399 --max_bytes_before_remerge_sort 2500604641 --min_compress_block_size 1538155 --max_compress_block_size 2859653 --merge_tree_compact_parts_min_granules_to_multibuffer_read 128 --optimize_sorting_by_input_stream_properties 0 --http_response_buffer_size 7807956 --http_wait_end_of_query False --enable_memory_bound_merging_of_aggregation_results 0 --min_count_to_compile_expression 0 --min_count_to_compile_aggregate_expression 0 --min_count_to_compile_sort_description 3 --session_timezone Mexico/BajaSur --use_page_cache_for_disks_without_file_cache True --page_cache_inject_eviction False --merge_tree_read_split_ranges_into_intersecting_and_non_intersecting_injection_probability 0.55 --prefer_external_sort_block_bytes 100000000 --cross_join_min_rows_to_compress 100000000 --cross_join_min_bytes_to_compress 1 --min_external_table_block_size_bytes 0 --max_parsing_threads 1 --optimize_functions_to_subcolumns 1 --parallel_replicas_local_plan 1 --query_plan_join_swap_table false --output_format_native_write_json_as_string 1 --enable_vertical_final 0
2025-04-01 21:50:37
2025-04-01 21:50:37 MergeTree settings used in test: --ratio_of_defaults_for_sparse_serialization 0.0 --prefer_fetch_merged_part_size_threshold 10737418240 --vertical_merge_algorithm_min_rows_to_activate 1000000 --vertical_merge_algorithm_min_columns_to_activate 1 --allow_vertical_merges_from_compact_to_wide_parts 0 --min_merge_bytes_to_use_direct_io 3765480514 --index_granularity_bytes 18561513 --merge_max_block_size 5560 --index_granularity 28407 --min_bytes_for_wide_part 258693546 --marks_compress_block_size 55772 --primary_key_compress_block_size 53525 --replace_long_file_name_to_hash 0 --max_file_name_length 128 --min_bytes_for_full_part_storage 536870912 --compact_parts_max_bytes_to_buffer 70655359 --compact_parts_max_granules_to_buffer 256 --compact_parts_merge_max_bytes_to_prefetch_part 14939658 --cache_populated_by_fetch 1 --concurrent_part_removal_threshold 39 --old_parts_lifetime 133 --prewarm_mark_cache 0 --use_const_adaptive_granularity 0 --enable_index_granularity_compression 0 --use_primary_key_cache 0 --prewarm_primary_key_cache 1
2025-04-01 21:50:37
2025-04-01 21:50:37 Database: test_x8cagg5f
2025-04-01 21:50:37 01909_mbtolou: [ OK ] 1.71 sec.
2025-04-01 21:50:38 02418_do_not_return_empty_blocks_from_ConvertingAggregatedToChunksTransform: [ OK ] 2.10 sec.
2025-04-01 21:50:38 01420_logical_functions_materialized_null: [ OK ] 1.11 sec.
2025-04-01 21:50:39 01179_insert_values_semicolon: [ OK ] 2.95 sec.
2025-04-01 21:50:39 02112_parse_date_yyyymmdd: [ OK ] 2.55 sec.
2025-04-01 21:50:39 02405_pmj_issue_40335: [ OK ] 1.20 sec.
2025-04-01 21:50:39 00442_filter_by_nullable: [ OK ] 1.60 sec.
2025-04-01 21:50:40 02352_lightweight_delete: [ OK ] 19.33 sec.
2025-04-01 21:50:40 01051_all_join_engine: [ OK ] 3.75 sec.
2025-04-01 21:50:40 03215_analyzer_replace_with_dummy_tables: [ OK ] 1.37 sec.
2025-04-01 21:50:41 03161_create_table_as_mv: [ OK ] 1.55 sec.
2025-04-01 21:50:41 01511_format_readable_timedelta: [ OK ] 1.15 sec.
2025-04-01 21:50:41 00166_functions_of_aggregation_states: [ OK ] 1.10 sec.
2025-04-01 21:50:42 01665_merge_tree_min_for_concurrent_read: [ OK ] 1.15 sec.
2025-04-01 21:50:43 02699_polygons_sym_difference_total_analyzer: [ OK ] 1.05 sec.
2025-04-01 21:50:44 00704_drop_truncate_memory_table: [ OK ] 4.51 sec.
2025-04-01 21:50:44 02564_query_id_header: [ OK ] 3.90 sec.
2025-04-01 21:50:45 02456_BLAKE3_hash_function_test: [ OK ] 0.99 sec.
2025-04-01 21:50:47 03032_async_backup_restore: [ OK ] 7.87 sec.
2025-04-01 21:50:48 02998_system_dns_cache_table: [ OK ] 0.95 sec.
2025-04-01 21:50:49 00977_join_use_nulls_denny_crane: [ OK ] 3.35 sec.
2025-04-01 21:50:51 00343_array_element_generic: [ OK ] 2.30 sec.
2025-04-01 21:50:53 00396_uuid_v7: [ OK ] 8.68 sec.
2025-04-01 21:50:55 00764_max_query_size_allocation: [ OK ] 2.12 sec.
2025-04-01 21:50:55 02234_cast_to_ip_address: [ OK ] 17.69 sec.
2025-04-01 21:50:55 00926_adaptive_index_granularity_replacing_merge_tree: [ OK ] 7.15 sec.
2025-04-01 21:50:57 02919_segfault_nullable_materialized_update: [ OK ] 1.80 sec.
2025-04-01 21:50:57 01086_odbc_roundtrip: [ SKIPPED ] 0.00 sec.
2025-04-01 21:50:57 Reason: not running for current build
2025-04-01 21:50:58 00005_shard_format_ast_and_remote_table_lambda: [ OK ] 1.27 sec.
2025-04-01 21:51:02 00755_avg_value_size_hint_passing: [ OK ] 7.02 sec.
2025-04-01 21:51:02 01037_polygon_dicts_correctness_all: [ OK ] 20.22 sec.
2025-04-01 21:51:03 02494_combinators_with_null_argument: [ OK ] 1.32 sec.
2025-04-01 21:51:04 01085_regexp_input_format: [ OK ] 8.56 sec.
2025-04-01 21:51:04 03214_parsing_archive_name_file: [ OK ] 23.57 sec.
2025-04-01 21:51:05 01431_finish_sorting_with_consts: [ OK ] 1.22 sec.
2025-04-01 21:51:07 00598_create_as_select_http: [ OK ] 4.16 sec.
2025-04-01 21:51:07 00955_test_final_mark_use: [ OK ] 8.34 sec.
2025-04-01 21:51:07 00953_moving_functions: [ OK ] 2.25 sec.
2025-04-01 21:51:08 02513_analyzer_sort_msan: [ OK ] 1.16 sec.
2025-04-01 21:51:10 02562_regexp_extract: [ OK ] 5.71 sec.
2025-04-01 21:51:10 01253_subquery_in_aggregate_function_JustStranger: [ OK ] 1.91 sec.
2025-04-01 21:51:12 02704_storage_merge_explain_graph_crash: [ OK ] 2.16 sec.
2025-04-01 21:51:13 02009_decimal_no_trailing_zeros: [ OK ] 2.48 sec.
2025-04-01 21:51:13 02495_analyzer_storage_join: [ OK ] 6.61 sec.
2025-04-01 21:51:14 01323_too_many_threads_bug: [ OK ] 9.66 sec.
2025-04-01 21:51:15 02983_empty_map_hasToken: [ OK ] 2.63 sec.
2025-04-01 21:51:15 02455_one_row_from_csv_memory_usage: [ OK ] 91.18 sec.
2025-04-01 21:51:15 02989_group_by_tuple: [ OK ] 0.96 sec.
2025-04-01 21:51:16 02677_analyzer_bitmap_has_any: [ OK ] 2.10 sec.
2025-04-01 21:51:16 00314_sample_factor_virtual_column: [ SKIPPED ] 0.00 sec.
2025-04-01 21:51:16 Reason: not running for current build
2025-04-01 21:51:16 03272_client_highlighting_bug: [ OK ] 2.77 sec.
2025-04-01 21:51:16 01670_sign_function: [ OK ] 1.65 sec.
2025-04-01 21:51:16 00760_url_functions_overflow: [ OK ] 1.22 sec.
2025-04-01 21:51:18 01126_month_partitioning_consistent_code: [ OK ] 1.10 sec.
2025-04-01 21:51:18 03279_with_clickhouse_driver: [ OK ] 0.40 sec.
2025-04-01 21:51:18 02294_nothing_arguments_in_functions: [ OK ] 2.36 sec.
2025-04-01 21:51:19 02250_insert_select_from_file_schema_inference: [ OK ] 1.20 sec.
2025-04-01 21:51:19 00757_enum_defaults: [ OK ] 2.97 sec.
2025-04-01 21:51:20 00965_logs_level_bugfix: [ OK ] 4.84 sec.
2025-04-01 21:51:20 00911_tautological_compare: [ OK ] 0.66 sec.
2025-04-01 21:51:20 02499_analyzer_aggregate_function_lambda_crash_fix: [ OK ] 1.95 sec.
2025-04-01 21:51:20 02206_information_schema_show_database: [ OK ] 0.95 sec.
2025-04-01 21:51:22 02681_undrop_query: [ OK ] 6.50 sec.
2025-04-01 21:51:22 01934_constexpr_aggregate_function_parameters: [ OK ] 2.61 sec.
2025-04-01 21:51:23 00411_long_accurate_number_comparison_float: [ OK ] 31.71 sec.
2025-04-01 21:51:23 02955_avro_format_zstd_encode_support: [ OK ] 1.10 sec.
2025-04-01 21:51:24 02498_analyzer_aggregate_functions_arithmetic_operations_pass_fix: [ OK ] 1.51 sec.
2025-04-01 21:51:27 01548_parallel_parsing_max_memory: [ OK ] 6.82 sec.
2025-04-01 21:51:28 01661_extract_all_groups_throw_fast: [ OK ] 7.49 sec.
2025-04-01 21:51:30 03168_inconsistent_ast_formatting: [ OK ] 5.02 sec.
2025-04-01 21:51:30 01552_impl_aggfunc_cloneresize: [ OK ] 1.71 sec.
2025-04-01 21:51:30 01848_http_insert_segfault: [ OK ] 7.59 sec.
2025-04-01 21:51:31 02786_transform_float: [ OK ] 1.06 sec.
2025-04-01 21:51:31 00712_prewhere_with_missing_columns: [ OK ] 1.65 sec.
2025-04-01 21:51:32 02771_system_user_processes: [ OK ] 8.47 sec.
2025-04-01 21:51:32 00500_point_in_polygon_bug_2: [ OK ] 0.91 sec.
2025-04-01 21:51:33 00553_invalid_nested_name: [ OK ] 0.86 sec.
2025-04-01 21:51:33 03042_analyzer_alias_join: [ OK ] 1.27 sec.
2025-04-01 21:51:33 01710_aggregate_projection_with_normalized_states: [ OK ] 2.14 sec.
2025-04-01 21:51:33 02935_date_trunc_case_unsensitiveness: [ OK ] 3.81 sec.
2025-04-01 21:51:34 02480_every_asynchronous_metric_must_have_documentation: [ OK ] 0.91 sec.
2025-04-01 21:51:34 00414_time_zones_direct_conversion: [ OK ] 1.01 sec.
2025-04-01 21:51:35 02122_parallel_formatting_CSVWithNamesAndTypes: [ OK ] 7.28 sec.
2025-04-01 21:51:35 02012_zookeeper_changed_enum_type_incompatible: [ OK ] 1.70 sec.
2025-04-01 21:51:36 02232_functions_to_subcolumns_alias: [ OK ] 1.26 sec.
2025-04-01 21:51:36 01479_cross_join_9855: [ OK ] 1.21 sec.
2025-04-01 21:51:37 02950_part_offset_as_primary_key: [ OK ] 3.00 sec.
2025-04-01 21:51:37 01803_untuple_subquery: [ OK ] 1.10 sec.
2025-04-01 21:51:37 02449_check_dependencies_and_table_shutdown: [ OK ] 1.95 sec.
2025-04-01 21:51:37 03211_convert_outer_join_to_inner_join_anti_join: [ OK ] 1.75 sec.
2025-04-01 21:51:38 00954_resample_combinator: [ OK ] 1.60 sec.
2025-04-01 21:51:38 03094_transform_return_first: [ OK ] 1.15 sec.
2025-04-01 21:51:40 01050_window_view_parser_tumble: [ OK ] 2.61 sec.
2025-04-01 21:51:40 02996_index_compaction_counterexample: [ OK ] 1.41 sec.
2025-04-01 21:51:41 01801_dateDiff_DateTime64: [ OK ] 3.65 sec.
2025-04-01 21:51:41 01506_ttl_same_with_order_by: [ OK ] 2.86 sec.
2025-04-01 21:51:43 02905_show_setting_query: [ OK ] 1.15 sec.
2025-04-01 21:51:44 01174_select_insert_isolation: [ OK ] 61.09 sec.
2025-04-01 21:51:44 00312_position_case_insensitive_utf8: [ OK ] 37.09 sec.
2025-04-01 21:51:44 02384_nullable_low_cardinality_as_dict_in_arrow: [ OK ] 1.25 sec.
2025-04-01 21:51:45 02900_limit_by_query_stage: [ OK ] 4.93 sec.
2025-04-01 21:51:45 02511_complex_literals_as_aggregate_function_parameters: [ OK ] 1.55 sec.
2025-04-01 21:51:45 02992_settings_overflow: [ OK ] 1.45 sec.
2025-04-01 21:51:46 00485_http_insert_format: [ OK ] 4.96 sec.
2025-04-01 21:51:46 00978_table_function_values_alias: [ OK ] 1.00 sec.
2025-04-01 21:51:47 03208_groupArrayIntersect_serialization: [ OK ] 3.05 sec.
2025-04-01 21:51:48 02497_analyzer_sum_if_count_if_pass_crash_fix: [ OK ] 1.00 sec.
2025-04-01 21:51:48 01833_test_collation_alvarotuso: [ OK ] 1.56 sec.
2025-04-01 21:51:49 02887_mutations_subcolumns: [ OK ] 3.96 sec.
2025-04-01 21:51:49 03013_fuzz_arrayPartialReverseSort: [ OK ] 1.21 sec.
2025-04-01 21:51:49 03033_from_unixtimestamp_joda_by_int64: [ OK ] 1.02 sec.
2025-04-01 21:51:49 02044_exists_operator: [ OK ] 1.72 sec.
2025-04-01 21:51:50 00283_column_cut: [ OK ] 1.05 sec.
2025-04-01 21:51:57 02889_file_log_save_errors: [ OK ] 7.68 sec.
2025-04-01 21:51:57 01074_window_view_event_tumble_asc_join_populate: [ OK ] 7.88 sec.
2025-04-01 21:51:58 00232_format_readable_size: [ OK ] 1.01 sec.
2025-04-01 21:51:59 01584_distributed_buffer_cannot_find_column: [ OK ] 1.96 sec.
2025-04-01 21:51:59 00534_filimonov: [ OK ] 19.18 sec.
2025-04-01 21:51:59 02662_sparse_columns_mutations_4: [ OK ] 1.45 sec.
2025-04-01 21:52:00 02448_clone_replica_lost_part: [ OK ] 39.32 sec.
2025-04-01 21:52:00 03036_dynamic_read_subcolumns_wide_merge_tree: [ SKIPPED ] 0.00 sec.
2025-04-01 21:52:00 Reason: not running for current build
2025-04-01 21:52:01 03089_analyzer_alias_replacement: [ OK ] 1.06 sec.
2025-04-01 21:52:01 01342_query_parameters_alias: [ OK ] 2.76 sec.
2025-04-01 21:52:02 03278_dateTime64_in_dateTime64_bug: [ OK ] 1.37 sec.
2025-04-01 21:52:02 01558_ttest: [ OK ] 3.08 sec.
2025-04-01 21:52:02 01552_alter_name_collision: [ OK ] 0.81 sec.
2025-04-01 21:52:03 01002_alter_nullable_adaptive_granularity_long: [ OK ] 13.89 sec.
2025-04-01 21:52:03 02580_like_substring_search_bug: [ OK ] 0.90 sec.
2025-04-01 21:52:03 02244_url_engine_headers_test: [ OK ] 3.61 sec.
2025-04-01 21:52:04 03095_group_by_server_constants_bug: [ OK ] 1.35 sec.
2025-04-01 21:52:05 02366_union_decimal_conversion: [ OK ] 1.25 sec.
2025-04-01 21:52:05 02662_sparse_columns_mutations_2: [ OK ] 2.16 sec.
2025-04-01 21:52:06 00702_join_with_using: [ OK ] 2.16 sec.
2025-04-01 21:52:06 03195_group_concat_deserialization_fix: [ OK ] 1.60 sec.
2025-04-01 21:52:07 02354_vector_search_unquoted_index_parameters: [ OK ] 1.47 sec.
2025-04-01 21:52:07 01568_window_functions_distributed: [ OK ] 3.82 sec.
2025-04-01 21:52:09 02811_read_in_order_and_array_join_bug: [ OK ] 1.45 sec.
2025-04-01 21:52:10 02122_parallel_formatting_PrettyCompact: [ OK ] 19.58 sec.
2025-04-01 21:52:10 01676_reinterpret_as: [ OK ] 3.65 sec.
2025-04-01 21:52:10 02294_anova_cmp: [ OK ] 5.16 sec.
2025-04-01 21:52:11 02450_kill_distributed_query_deadlock: [ OK ] 36.68 sec.
2025-04-01 21:52:12 01151_storage_merge_filter_tables_by_virtual_column: [ OK ] 2.36 sec.
2025-04-01 21:52:13 01674_filter_by_uint8: [ OK ] 1.55 sec.
2025-04-01 21:52:13 01825_new_type_json_btc: [ OK ] 11.33 sec.
2025-04-01 21:52:14 01905_to_json_string: [ OK ] 1.15 sec.
2025-04-01 21:52:16 03220_replace_formatting: [ OK ] 2.20 sec.
2025-04-01 21:52:16 01055_compact_parts_granularity: [ OK ] 8.52 sec.
2025-04-01 21:52:16 02863_non_const_timezone_check: [ OK ] 3.01 sec.
2025-04-01 21:52:17 02286_parallel_final: [ OK ] 31.34 sec.
2025-04-01 21:52:18 03198_json_extract_more_types: [ OK ] 1.91 sec.
2025-04-01 21:52:20 00398_url_functions: [ OK ] 9.48 sec.
2025-04-01 21:52:23 03232_pr_not_ready_set: [ OK ] 4.88 sec.
2025-04-01 21:52:25 03125_analyzer_CTE_two_joins: [ OK ] 1.37 sec.
2025-04-01 21:52:27 03001_insert_threads_deduplication: [ OK ] 2.37 sec.
2025-04-01 21:52:28 00543_access_to_temporary_table_in_readonly_mode: [ OK ] 11.55 sec.
2025-04-01 21:52:30 01350_intdiv_nontrivial_fpe: [ OK ] 2.78 sec.
2025-04-01 21:52:32 02158_explain_ast_alter_commands: [ OK ] 3.75 sec.
2025-04-01 21:52:32 02704_max_backup_bandwidth: [ OK ] 14.71 sec.
2025-04-01 21:52:32 01516_date_time_output_format: [ OK ] 1.75 sec.
2025-04-01 21:52:33 02286_function_wyhash: [ OK ] 1.15 sec.
2025-04-01 21:52:33 02966_float32_promotion: [ OK ] 1.05 sec.
2025-04-01 21:52:33 02690_subquery_identifiers: [ OK ] 1.40 sec.
2025-04-01 21:52:34 02534_keyed_siphash: [ OK ] 24.17 sec.
2025-04-01 21:52:35 00531_aggregate_over_nullable: [ OK ] 1.60 sec.
2025-04-01 21:52:35 00615_nullable_alter_optimize: [ OK ] 1.80 sec.
2025-04-01 21:52:36 01521_global_in_prewhere_15792: [ OK ] 1.81 sec.
2025-04-01 21:52:37 02884_interval_operator_support_plural_literal: [ OK ] 2.06 sec.
2025-04-01 21:52:38 01514_input_format_csv_enum_as_number_setting: [ OK ] 1.11 sec.
2025-04-01 21:52:38 02723_zookeeper_name: [ OK ] 2.02 sec.
2025-04-01 21:52:38 02283_array_norm: [ OK ] 4.66 sec.
2025-04-01 21:52:41 03228_join_to_rerange_right_table: [ OK ] 2.58 sec.
2025-04-01 21:52:41 00163_shard_join_with_empty_table: [ OK ] 3.28 sec.
2025-04-01 21:52:42 02313_dump_column_structure_low_cardinality: [ OK ] 0.97 sec.
2025-04-01 21:52:43 00881_unknown_identifier_in_in: [ OK ] 1.01 sec.
2025-04-01 21:52:44 00488_non_ascii_column_names: [ OK ] 1.11 sec.
2025-04-01 21:52:44 02345_implicit_transaction: [ OK ] 9.24 sec.
2025-04-01 21:52:44 01732_race_condition_storage_join_long: [ OK ] 24.07 sec.
2025-04-01 21:52:45 02041_conversion_between_date32_and_datetime64: [ OK ] 1.05 sec.
2025-04-01 21:52:45 00406_tuples_with_nulls: [ OK ] 1.07 sec.
2025-04-01 21:52:46 02356_trivial_count_with_empty_set: [ OK ] 1.15 sec.
2025-04-01 21:52:46 02752_custom_separated_ignore_spaces_bug: [ OK ] 1.07 sec.
2025-04-01 21:52:47 02661_read_from_archive_tarxz: [ OK ] 37.82 sec.
2025-04-01 21:52:48 01704_transform_with_float_key: [ OK ] 1.20 sec.
2025-04-01 21:52:48 03271_max_bytes_ratio_before_external_order_by: [ OK ] 1.25 sec.
2025-04-01 21:52:48 01455_time_zones: [ OK ] 1.00 sec.
2025-04-01 21:52:49 01016_simhash_minhash: [ OK ] 7.13 sec.
2025-04-01 21:52:49 00759_kodieg: [ OK ] 0.95 sec.
2025-04-01 21:52:49 02920_fix_json_merge_patch: [ OK ] 1.11 sec.
2025-04-01 21:52:49 00531_client_ignore_error: [ OK ] 4.72 sec.
2025-04-01 21:52:50 01114_alter_modify_compact_parts: [ OK ] 1.76 sec.
2025-04-01 21:52:50 01638_div_mod_ambiguities: [ OK ] 1.37 sec.
2025-04-01 21:52:50 00253_insert_recursive_defaults: [ OK ] 1.52 sec.
2025-04-01 21:52:51 02952_conjunction_optimization: [ OK ] 1.76 sec.
2025-04-01 21:52:51 03208_multiple_joins_with_storage_join: [ OK ] 2.31 sec.
2025-04-01 21:52:52 01319_manual_write_to_replicas_long: [ OK ] 2.06 sec.
2025-04-01 21:52:53 02706_arrow_different_dictionaries: [ OK ] 2.66 sec.
2025-04-01 21:52:54 03275_ignore_nonexistent_files_fix: [ OK ] 1.05 sec.
2025-04-01 21:52:54 02990_format_lambdas: [ OK ] 2.86 sec.
2025-04-01 21:52:55 01455_shard_leaf_max_rows_bytes_to_read: [ OK ] 5.39 sec.
2025-04-01 21:52:55 01560_mann_whitney: [ OK ] 2.08 sec.
2025-04-01 21:52:56 03010_file_log_large_poll_batch_size: [ OK ] 1.26 sec.
2025-04-01 21:52:58 02982_json_columns_with_metadata_http: [ OK ] 4.66 sec.
2025-04-01 21:52:59 01157_replace_table: [ OK ] 3.84 sec.
2025-04-01 21:53:00 00035_function_array_return_type: [ OK ] 1.11 sec.
2025-04-01 21:53:00 02354_vector_search_multiple_indexes: [ OK ] 1.25 sec.
2025-04-01 21:53:01 03172_system_detached_tables: [ OK ] 5.11 sec.
2025-04-01 21:53:01 03033_virtual_column_override: [ OK ] 1.10 sec.
2025-04-01 21:53:02 02012_low_cardinality_uuid_with_extremes: [ OK ] 1.17 sec.
2025-04-01 21:53:03 00237_group_by_arrays: [ OK ] 2.34 sec.
2025-04-01 21:53:03 00741_client_comment_multiline: [ OK ] 0.79 sec.
2025-04-01 21:53:04 01375_storage_file_write_prefix_tsv_with_names: [ OK ] 1.47 sec.
2025-04-01 21:53:04 02226_in_untuple_issue_34810: [ OK ] 1.32 sec.
2025-04-01 21:53:06 00801_daylight_saving_time_hour_underflow: [ OK ] 1.16 sec.
2025-04-01 21:53:06 01019_parallel_parsing_cancel: [ OK ] 28.05 sec.
2025-04-01 21:53:08 00399_group_uniq_array_date_datetime: [ OK ] 1.45 sec.
2025-04-01 21:53:09 00573_shard_aggregation_by_empty_set: [ OK ] 3.08 sec.
2025-04-01 21:53:09 02704_keeper_map_zk_nodes: [ OK ] 18.77 sec.
2025-04-01 21:53:11 02370_extractAll_regress: [ OK ] 1.01 sec.
2025-04-01 21:53:11 00282_merging: [ OK ] 6.58 sec.
2025-04-01 21:53:12 00837_minmax_index_replicated_zookeeper_long: [ OK ] 4.47 sec.
2025-04-01 21:53:13 01506_buffer_table_alter_block_structure_2: [ OK ] 1.32 sec.
2025-04-01 21:53:13 03227_print_pretty_tuples_create_query: [ SKIPPED ] 0.00 sec.
2025-04-01 21:53:13 Reason: not running for current build
2025-04-01 21:53:15 02933_paste_join: [ OK ] 6.36 sec.
2025-04-01 21:53:16 02765_queries_with_subqueries_profile_events: [ OK ] 61.94 sec.
2025-04-01 21:53:16 02899_use_default_format_on_http_exception: [ OK ] 4.07 sec.
2025-04-01 21:53:16 02001_hostname_test: [ OK ] 1.17 sec.
2025-04-01 21:53:17 01079_reinterpret_as_fixed_string: [ OK ] 0.90 sec.
2025-04-01 21:53:17 03251_parquet_page_v2_native_reader: [ OK ] 4.11 sec.
2025-04-01 21:53:17 02901_parallel_replicas_rollup: [ OK ] 21.18 sec.
2025-04-01 21:53:17 01054_cache_dictionary_bunch_update: [ OK ] 16.63 sec.
2025-04-01 21:53:18 02366_kql_func_binary: [ OK ] 1.10 sec.
2025-04-01 21:53:18 02169_fix_view_offset_limit_setting: [ OK ] 1.35 sec.
2025-04-01 21:53:18 00233_position_function_sql_comparibilty: [ OK ] 1.55 sec.
2025-04-01 21:53:19 01112_check_table_with_index: [ OK ] 1.11 sec.
2025-04-01 21:53:19 02366_kql_func_scalar: [ OK ] 1.95 sec.
2025-04-01 21:53:19 02151_clickhouse_client_hints: [ OK ] 2.16 sec.
2025-04-01 21:53:19 01915_for_each_crakjie: [ OK ] 1.10 sec.
2025-04-01 21:53:20 01566_negate_formatting: [ OK ] 1.16 sec.
2025-04-01 21:53:20 01277_convert_field_to_type_logical_error: [ OK ] 1.56 sec.
2025-04-01 21:53:22 01456_min_negative_decimal_formatting: [ OK ] 1.22 sec.
2025-04-01 21:53:22 03213_deep_json: [ OK ] 3.52 sec.
2025-04-01 21:53:23 01710_aggregate_projection_with_grouping_set: [ OK ] 3.97 sec.
2025-04-01 21:53:24 02891_empty_tuple: [ OK ] 4.41 sec.
2025-04-01 21:53:25 03035_materialized_primary_key: [ OK ] 3.42 sec.
2025-04-01 21:53:25 01683_codec_encrypted: [ OK ] 3.08 sec.
2025-04-01 21:53:34 01656_join_defaul_enum: [ OK ] 8.10 sec.
2025-04-01 21:53:39 01008_materialized_view_henyihanwobushi: [ OK ] 13.30 sec.
2025-04-01 21:53:47 01019_alter_materialized_view_consistent: [ OK ] 22.61 sec.
2025-04-01 21:53:50 02730_dictionary_hashed_load_factor_element_count: [ OK ] 24.78 sec.
2025-04-01 21:53:52 03000_minmax_index_first: [ OK ] 4.39 sec.
2025-04-01 21:53:55 01624_soft_constraints: [ FAIL ] 43.61 sec.
2025-04-01 21:53:55 Reason: result differs with reference:
2025-04-01 21:53:55 --- /repo/tests/queries/0_stateless/01624_soft_constraints.reference 2025-04-01 21:41:49.741983371 +0000
2025-04-01 21:53:55 +++ /repo/tests/queries/0_stateless/01624_soft_constraints.stdout 2025-04-01 21:53:54.823238690 +0000
2025-04-01 21:53:55 @@ -1,16 +1,16 @@
2025-04-01 21:53:55 "rows_read": 4,
2025-04-01 21:53:55 - "rows_read": 2,
2025-04-01 21:53:55 "rows_read": 4,
2025-04-01 21:53:55 - "rows_read": 2,
2025-04-01 21:53:55 - "rows_read": 2,
2025-04-01 21:53:55 - "rows_read": 2,
2025-04-01 21:53:55 "rows_read": 4,
2025-04-01 21:53:55 - "rows_read": 2,
2025-04-01 21:53:55 "rows_read": 4,
2025-04-01 21:53:55 "rows_read": 4,
2025-04-01 21:53:55 - "rows_read": 1,
2025-04-01 21:53:55 "rows_read": 4,
2025-04-01 21:53:55 - "rows_read": 3,
2025-04-01 21:53:55 "rows_read": 4,
2025-04-01 21:53:55 "rows_read": 4,
2025-04-01 21:53:55 - "rows_read": 3,
2025-04-01 21:53:55 + "rows_read": 4,
2025-04-01 21:53:55 + "rows_read": 4,
2025-04-01 21:53:55 + "rows_read": 4,
2025-04-01 21:53:55 + "rows_read": 4,
2025-04-01 21:53:55 + "rows_read": 4,
2025-04-01 21:53:55 + "rows_read": 4,
2025-04-01 21:53:55 + "rows_read": 4,
2025-04-01 21:53:55 + "rows_read": 4,
2025-04-01 21:53:55
2025-04-01 21:53:55
2025-04-01 21:53:55 Settings used in the test: --max_insert_threads 2 --group_by_two_level_threshold 783246 --group_by_two_level_threshold_bytes 50000000 --distributed_aggregation_memory_efficient 0 --fsync_metadata 0 --output_format_parallel_formatting 0 --input_format_parallel_parsing 0 --min_chunk_bytes_for_parallel_parsing 8171523 --max_read_buffer_size 853119 --prefer_localhost_replica 0 --max_block_size 77271 --max_joined_block_size_rows 52341 --max_threads 3 --optimize_append_index 0 --optimize_if_chain_to_multiif 0 --optimize_if_transform_strings_to_enum 1 --optimize_read_in_order 0 --optimize_or_like_chain 0 --optimize_substitute_columns 1 --enable_multiple_prewhere_read_steps 0 --read_in_order_two_level_merge_threshold 11 --optimize_aggregation_in_order 0 --aggregation_in_order_max_block_bytes 32473917 --use_uncompressed_cache 0 --min_bytes_to_use_direct_io 10737418240 --min_bytes_to_use_mmap_io 2023004507 --local_filesystem_read_method mmap --remote_filesystem_read_method threadpool --local_filesystem_read_prefetch 0 --filesystem_cache_segments_batch_size 100 --read_from_filesystem_cache_if_exists_otherwise_bypass_cache 0 --throw_on_error_from_cache_on_write_operations 0 --remote_filesystem_read_prefetch 0 --allow_prefetched_read_pool_for_remote_filesystem 1 --filesystem_prefetch_max_memory_usage 32Mi --filesystem_prefetches_limit 10 --filesystem_prefetch_min_bytes_for_single_read_task 1Mi --filesystem_prefetch_step_marks 50 --filesystem_prefetch_step_bytes 0 --compile_aggregate_expressions 1 --compile_sort_description 1 --merge_tree_coarse_index_granularity 2 --optimize_distinct_in_order 1 --max_bytes_before_external_sort 5994329488 --max_bytes_before_external_group_by 0 --max_bytes_before_remerge_sort 2781895526 --min_compress_block_size 1713594 --max_compress_block_size 573802 --merge_tree_compact_parts_min_granules_to_multibuffer_read 9 --optimize_sorting_by_input_stream_properties 1 --http_response_buffer_size 6163976 --http_wait_end_of_query True --enable_memory_bound_merging_of_aggregation_results 0 --min_count_to_compile_expression 3 --min_count_to_compile_aggregate_expression 3 --min_count_to_compile_sort_description 3 --session_timezone America/Mazatlan --use_page_cache_for_disks_without_file_cache False --page_cache_inject_eviction True --merge_tree_read_split_ranges_into_intersecting_and_non_intersecting_injection_probability 0.67 --prefer_external_sort_block_bytes 1 --cross_join_min_rows_to_compress 1 --cross_join_min_bytes_to_compress 0 --min_external_table_block_size_bytes 0 --max_parsing_threads 10 --optimize_functions_to_subcolumns 0 --parallel_replicas_local_plan 0 --query_plan_join_swap_table auto --output_format_native_write_json_as_string 0 --enable_vertical_final 1
2025-04-01 21:53:55
2025-04-01 21:53:55 MergeTree settings used in test: --ratio_of_defaults_for_sparse_serialization 1.0 --prefer_fetch_merged_part_size_threshold 10737418240 --vertical_merge_algorithm_min_rows_to_activate 1000000 --vertical_merge_algorithm_min_columns_to_activate 100 --allow_vertical_merges_from_compact_to_wide_parts 1 --min_merge_bytes_to_use_direct_io 10063658972 --index_granularity_bytes 28724770 --merge_max_block_size 8140 --index_granularity 14211 --min_bytes_for_wide_part 267871795 --marks_compress_block_size 18394 --primary_key_compress_block_size 72449 --replace_long_file_name_to_hash 1 --max_file_name_length 0 --min_bytes_for_full_part_storage 488464614 --compact_parts_max_bytes_to_buffer 494445199 --compact_parts_max_granules_to_buffer 256 --compact_parts_merge_max_bytes_to_prefetch_part 25984456 --cache_populated_by_fetch 1 --concurrent_part_removal_threshold 0 --old_parts_lifetime 142 --prewarm_mark_cache 0 --use_const_adaptive_granularity 0 --enable_index_granularity_compression 0 --use_primary_key_cache 1 --prewarm_primary_key_cache 1
2025-04-01 21:53:55
2025-04-01 21:53:55 Database: test_0jxzhmiq
2025-04-01 21:53:56 02893_vertical_final_bugs: [ OK ] 5.35 sec.
2025-04-01 21:53:59 00394_replaceall_vector_fixed: [ OK ] 2.60 sec.
2025-04-01 21:53:59 01683_text_log_deadlock: [ OK ] 41.30 sec.
2025-04-01 21:54:01 01254_dict_load_after_detach_attach: [ OK ] 1.84 sec.
2025-04-01 21:54:02 02375_stack_trace_no_addresses: [ OK ] 7.28 sec.
2025-04-01 21:54:02 02841_check_table_progress: [ OK ] 22.50 sec.
2025-04-01 21:54:04 01732_bigint_ubsan: [ OK ] 2.56 sec.
2025-04-01 21:54:05 01400_join_get_with_multi_keys: [ OK ] 1.25 sec.
2025-04-01 21:54:06 01021_tuple_parser: [ OK ] 1.15 sec.
2025-04-01 21:54:08 02377_executable_function_settings: [ OK ] 2.25 sec.
2025-04-01 21:54:10 03164_materialize_skip_index: [ OK ] 18.02 sec.
2025-04-01 21:54:12 02498_analyzer_settings_push_down: [ OK ] 1.81 sec.
2025-04-01 21:54:12 02156_async_insert_query_log: [ OK ] 37.85 sec.
2025-04-01 21:54:13 01910_client_replxx_container_overflow_long: [ OK ] 4.06 sec.
2025-04-01 21:54:14 00547_named_tuples: [ OK ] 0.90 sec.
2025-04-01 21:54:14 03174_split_parts_ranges_into_intersecting_and_non_intersecting_final_and_read-in-order_bug: [ SKIPPED ] 0.00 sec.
2025-04-01 21:54:14 Reason: not running for current build
2025-04-01 21:54:16 02898_parallel_replicas_progress_bar: [ OK ] 13.59 sec.
2025-04-01 21:54:16 02242_join_rocksdb: [ OK ] 4.09 sec.
2025-04-01 21:54:16 02116_interactive_hello: [ OK ] 2.66 sec.
2025-04-01 21:54:17 01560_ttl_remove_empty_parts: [ OK ] 14.18 sec.
2025-04-01 21:54:19 02231_bloom_filter_sizing: [ OK ] 2.56 sec.
2025-04-01 21:54:20 02763_mutate_compact_part_with_skip_indices_and_projections: [ OK ] 2.76 sec.
2025-04-01 21:54:21 02377_fix_file_virtual_column: [ OK ] 1.78 sec.
2025-04-01 21:54:23 00999_full_join_dup_keys_crash: [ OK ] 6.07 sec.
2025-04-01 21:54:28 03164_selects_with_pk_usage_profile_event: [ OK ] 29.14 sec.
2025-04-01 21:54:30 02718_array_fold: [ OK ] 8.84 sec.
2025-04-01 21:54:30 02226_async_insert_table_function: [ OK ] 1.51 sec.
2025-04-01 21:54:31 00645_date_time_input_format: [ OK ] 1.31 sec.
2025-04-01 21:54:31 01716_array_difference_overflow: [ OK ] 1.42 sec.
2025-04-01 21:54:32 01010_low_cardinality_and_native_http: [ OK ] 12.59 sec.
2025-04-01 21:54:33 02888_single_state_nullable_type: [ OK ] 1.15 sec.
2025-04-01 21:54:33 01291_geo_types: [ OK ] 1.40 sec.
2025-04-01 21:54:35 00424_shard_aggregate_functions_of_nullable: [ OK ] 2.50 sec.
2025-04-01 21:54:35 00941_system_columns_race_condition: [ OK ] 19.38 sec.
2025-04-01 21:54:37 02985_shard_query_start_time: [ OK ] 4.23 sec.
2025-04-01 21:54:38 02239_bzip2_bug: [ OK ] 14.92 sec.
2025-04-01 21:54:38 03217_json_merge_patch_stack_overflow: [ OK ] 5.13 sec.
2025-04-01 21:54:39 00015_totals_having_constants: [ OK ] 0.96 sec.
2025-04-01 21:54:42 02932_apply_deleted_mask: [ OK ] 4.66 sec.
2025-04-01 21:54:43 02868_distinct_to_count_optimization: [ OK ] 4.71 sec.
2025-04-01 21:54:43 01611_constant_folding_subqueries: [ OK ] 1.52 sec.
2025-04-01 21:54:45 00068_empty_tiny_log: [ OK ] 1.02 sec.
2025-04-01 21:54:48 00623_truncate_all_tables: [ OK ] 3.02 sec.
2025-04-01 21:54:49 02573_quantile_fuse_msan: [ OK ] 1.11 sec.
2025-04-01 21:54:50 00331_final_and_prewhere_condition_ver_column: [ OK ] 1.31 sec.
2025-04-01 21:54:52 02377_optimize_sorting_by_input_stream_properties_explain: [ OK ] 40.25 sec.
2025-04-01 21:54:54 01000_bad_size_of_marks_skip_idx: [ OK ] 3.97 sec.
2025-04-01 21:54:56 00838_unique_index: [ OK ] 12.94 sec.
2025-04-01 21:54:58 00907_set_index_max_rows: [ OK ] 5.89 sec.
2025-04-01 21:54:58 01213_alter_rename_column: [ OK ] 3.87 sec.
2025-04-01 21:54:59 00342_escape_sequences: [ OK ] 1.00 sec.
2025-04-01 21:55:01 02722_line_as_string_consistency: [ OK ] 5.02 sec.
2025-04-01 21:55:02 01076_array_join_prewhere_const_folding: [ OK ] 3.78 sec.
2025-04-01 21:55:03 03157_negative_positional_arguments_ubsan: [ OK ] 1.40 sec.
2025-04-01 21:55:03 02354_tuple_lowcardinality: [ OK ] 1.23 sec.
2025-04-01 21:55:03 01913_quantile_deterministic: [ OK ] 28.04 sec.
2025-04-01 21:55:04 01661_arraySlice_ubsan: [ OK ] 1.06 sec.
2025-04-01 21:55:04 02101_avro_union_index_out_of_boundary: [ OK ] 4.51 sec.
2025-04-01 21:55:05 02479_analyzer_aggregation_totals_rollup_crash_fix: [ OK ] 1.11 sec.
2025-04-01 21:55:05 02963_invalid_identifier: [ OK ] 1.50 sec.
2025-04-01 21:55:06 03273_select_from_explain_ast_non_select: [ OK ] 1.02 sec.
2025-04-01 21:55:07 01642_if_nullable_regression: [ OK ] 2.41 sec.
2025-04-01 21:55:07 01854_s2_cap_contains: [ OK ] 3.61 sec.
2025-04-01 21:55:08 02952_binary: [ OK ] 1.80 sec.
2025-04-01 21:55:08 02834_nulls_first_sort: [ OK ] 1.11 sec.
2025-04-01 21:55:08 01070_h3_indexes_are_neighbors: [ OK ] 1.05 sec.
2025-04-01 21:55:10 00206_empty_array_to_single: [ OK ] 1.36 sec.
2025-04-01 21:55:10 00912_string_comparison: [ OK ] 2.60 sec.
2025-04-01 21:55:12 00397_tsv_format_synonym: [ OK ] 1.82 sec.
2025-04-01 21:55:13 02099_hashed_array_dictionary_complex_key: [ OK ] 34.47 sec.
2025-04-01 21:55:15 00974_text_log_table_not_empty: [ OK ] 6.74 sec.
2025-04-01 21:55:15 02982_minmax_nan_null_order: [ OK ] 3.06 sec.
2025-04-01 21:55:16 02998_analyzer_prewhere_report: [ OK ] 1.06 sec.
2025-04-01 21:55:16 00718_low_cardinaliry_alter: [ OK ] 2.71 sec.
2025-04-01 21:55:17 00044_sorting_by_string_descending: [ OK ] 1.00 sec.
2025-04-01 21:55:17 00323_quantiles_timing_bug: [ OK ] 2.76 sec.
2025-04-01 21:55:28 02459_group_by_all: [ FAIL ] 0.85 sec.
2025-04-01 21:55:28 Reason: return code: 210
2025-04-01 21:55:28 Code: 210. DB::NetException: Connection reset by peer, while reading from socket (peer: [::1]:9000, local: [::1]:60356): (localhost:9000, ::1, local address: [::1]:60356). (NETWORK_ERROR)
2025-04-01 21:55:28
2025-04-01 21:55:28 , result:
2025-04-01 21:55:28
2025-04-01 21:55:28
2025-04-01 21:55:28
2025-04-01 21:55:28 stdout:
2025-04-01 21:55:28
2025-04-01 21:55:28
2025-04-01 21:55:28 Settings used in the test: --max_insert_threads 1 --group_by_two_level_threshold 926968 --group_by_two_level_threshold_bytes 50000000 --distributed_aggregation_memory_efficient 1 --fsync_metadata 1 --output_format_parallel_formatting 1 --input_format_parallel_parsing 0 --min_chunk_bytes_for_parallel_parsing 15695912 --max_read_buffer_size 530186 --prefer_localhost_replica 0 --max_block_size 88155 --max_joined_block_size_rows 26541 --max_threads 1 --optimize_append_index 1 --optimize_if_chain_to_multiif 1 --optimize_if_transform_strings_to_enum 0 --optimize_read_in_order 1 --optimize_or_like_chain 0 --optimize_substitute_columns 0 --enable_multiple_prewhere_read_steps 1 --read_in_order_two_level_merge_threshold 72 --optimize_aggregation_in_order 0 --aggregation_in_order_max_block_bytes 3707684 --use_uncompressed_cache 1 --min_bytes_to_use_direct_io 10737418240 --min_bytes_to_use_mmap_io 1 --local_filesystem_read_method io_uring --remote_filesystem_read_method threadpool --local_filesystem_read_prefetch 1 --filesystem_cache_segments_batch_size 100 --read_from_filesystem_cache_if_exists_otherwise_bypass_cache 1 --throw_on_error_from_cache_on_write_operations 0 --remote_filesystem_read_prefetch 1 --allow_prefetched_read_pool_for_remote_filesystem 1 --filesystem_prefetch_max_memory_usage 32Mi --filesystem_prefetches_limit 0 --filesystem_prefetch_min_bytes_for_single_read_task 16Mi --filesystem_prefetch_step_marks 50 --filesystem_prefetch_step_bytes 0 --compile_aggregate_expressions 0 --compile_sort_description 0 --merge_tree_coarse_index_granularity 23 --optimize_distinct_in_order 1 --max_bytes_before_external_sort 0 --max_bytes_before_external_group_by 10737418240 --max_bytes_before_remerge_sort 1348609768 --min_compress_block_size 17882 --max_compress_block_size 98502 --merge_tree_compact_parts_min_granules_to_multibuffer_read 116 --optimize_sorting_by_input_stream_properties 0 --http_response_buffer_size 1057426 --http_wait_end_of_query True --enable_memory_bound_merging_of_aggregation_results 1 --min_count_to_compile_expression 0 --min_count_to_compile_aggregate_expression 3 --min_count_to_compile_sort_description 0 --session_timezone Atlantic/Azores --use_page_cache_for_disks_without_file_cache False --page_cache_inject_eviction True --merge_tree_read_split_ranges_into_intersecting_and_non_intersecting_injection_probability 0.51 --prefer_external_sort_block_bytes 0 --cross_join_min_rows_to_compress 0 --cross_join_min_bytes_to_compress 100000000 --min_external_table_block_size_bytes 0 --max_parsing_threads 10 --optimize_functions_to_subcolumns 0 --parallel_replicas_local_plan 0 --query_plan_join_swap_table auto --output_format_native_write_json_as_string 0 --enable_vertical_final 1
2025-04-01 21:55:28
2025-04-01 21:55:28 MergeTree settings used in test: --ratio_of_defaults_for_sparse_serialization 1.0 --prefer_fetch_merged_part_size_threshold 10737418240 --vertical_merge_algorithm_min_rows_to_activate 1 --vertical_merge_algorithm_min_columns_to_activate 100 --allow_vertical_merges_from_compact_to_wide_parts 1 --min_merge_bytes_to_use_direct_io 1076039735 --index_granularity_bytes 11093201 --merge_max_block_size 4674 --index_granularity 27013 --min_bytes_for_wide_part 1048833509 --marks_compress_block_size 99118 --primary_key_compress_block_size 84378 --replace_long_file_name_to_hash 0 --max_file_name_length 0 --min_bytes_for_full_part_storage 511408781 --compact_parts_max_bytes_to_buffer 24338567 --compact_parts_max_granules_to_buffer 70 --compact_parts_merge_max_bytes_to_prefetch_part 22377952 --cache_populated_by_fetch 1 --concurrent_part_removal_threshold 14 --old_parts_lifetime 10 --prewarm_mark_cache 1 --use_const_adaptive_granularity 0 --enable_index_granularity_compression 0 --use_primary_key_cache 0 --prewarm_primary_key_cache 1
2025-04-01 21:55:28
2025-04-01 21:55:28 Database: test_pcb8eq08
2025-04-01 21:55:28 00429_long_http_bufferization: [ FAIL ] 182.05 sec.
2025-04-01 21:55:28 Reason: return code: 210
2025-04-01 21:55:28 Error on processing query: Code: 210. DB::NetException: Connection refused (localhost:9000). (NETWORK_ERROR) (version 24.12.2.20221.altinityantalya (altinity build))
2025-04-01 21:55:28 (query: SELECT greatest(toUInt8(1), toUInt8(intHash64(number))) FROM system.numbers LIMIT 3500000 FORMAT RowBinary)
2025-04-01 21:55:28 , result:
2025-04-01 21:55:28
2025-04-01 21:55:28
2025-04-01 21:55:28
2025-04-01 21:55:28 stdout:
2025-04-01 21:55:28
2025-04-01 21:55:28
2025-04-01 21:55:28 Settings used in the test: --max_insert_threads 3 --group_by_two_level_threshold 1 --group_by_two_level_threshold_bytes 1 --distributed_aggregation_memory_efficient 0 --fsync_metadata 1 --output_format_parallel_formatting 1 --input_format_parallel_parsing 0 --min_chunk_bytes_for_parallel_parsing 10557050 --max_read_buffer_size 687669 --prefer_localhost_replica 1 --max_block_size 31146 --max_joined_block_size_rows 40801 --max_threads 1 --optimize_append_index 1 --optimize_if_chain_to_multiif 0 --optimize_if_transform_strings_to_enum 0 --optimize_read_in_order 1 --optimize_or_like_chain 0 --optimize_substitute_columns 0 --enable_multiple_prewhere_read_steps 0 --read_in_order_two_level_merge_threshold 54 --optimize_aggregation_in_order 0 --aggregation_in_order_max_block_bytes 43715243 --use_uncompressed_cache 0 --min_bytes_to_use_direct_io 1 --min_bytes_to_use_mmap_io 10737418240 --local_filesystem_read_method pread --remote_filesystem_read_method threadpool --local_filesystem_read_prefetch 0 --filesystem_cache_segments_batch_size 0 --read_from_filesystem_cache_if_exists_otherwise_bypass_cache 0 --throw_on_error_from_cache_on_write_operations 1 --remote_filesystem_read_prefetch 1 --allow_prefetched_read_pool_for_remote_filesystem 0 --filesystem_prefetch_max_memory_usage 32Mi --filesystem_prefetches_limit 0 --filesystem_prefetch_min_bytes_for_single_read_task 8Mi --filesystem_prefetch_step_marks 50 --filesystem_prefetch_step_bytes 0 --compile_aggregate_expressions 0 --compile_sort_description 1 --merge_tree_coarse_index_granularity 16 --optimize_distinct_in_order 1 --max_bytes_before_external_sort 10737418240 --max_bytes_before_external_group_by 10737418240 --max_bytes_before_remerge_sort 2495388058 --min_compress_block_size 29806 --max_compress_block_size 2959610 --merge_tree_compact_parts_min_granules_to_multibuffer_read 24 --optimize_sorting_by_input_stream_properties 0 --http_response_buffer_size 5746511 --http_wait_end_of_query True --enable_memory_bound_merging_of_aggregation_results 1 --min_count_to_compile_expression 3 --min_count_to_compile_aggregate_expression 0 --min_count_to_compile_sort_description 3 --session_timezone America/Mazatlan --use_page_cache_for_disks_without_file_cache True --page_cache_inject_eviction False --merge_tree_read_split_ranges_into_intersecting_and_non_intersecting_injection_probability 0.25 --prefer_external_sort_block_bytes 100000000 --cross_join_min_rows_to_compress 100000000 --cross_join_min_bytes_to_compress 100000000 --min_external_table_block_size_bytes 100000000 --max_parsing_threads 10 --optimize_functions_to_subcolumns 1 --parallel_replicas_local_plan 1 --query_plan_join_swap_table false --output_format_native_write_json_as_string 1 --enable_vertical_final 0
2025-04-01 21:55:28
2025-04-01 21:55:28 MergeTree settings used in test: --ratio_of_defaults_for_sparse_serialization 0.0 --prefer_fetch_merged_part_size_threshold 5732284980 --vertical_merge_algorithm_min_rows_to_activate 500882 --vertical_merge_algorithm_min_columns_to_activate 5 --allow_vertical_merges_from_compact_to_wide_parts 0 --min_merge_bytes_to_use_direct_io 1 --index_granularity_bytes 12253727 --merge_max_block_size 18433 --index_granularity 28366 --min_bytes_for_wide_part 0 --marks_compress_block_size 71361 --primary_key_compress_block_size 62557 --replace_long_file_name_to_hash 0 --max_file_name_length 99 --min_bytes_for_full_part_storage 0 --compact_parts_max_bytes_to_buffer 12263385 --compact_parts_max_granules_to_buffer 57 --compact_parts_merge_max_bytes_to_prefetch_part 16824239 --cache_populated_by_fetch 0 --concurrent_part_removal_threshold 14 --old_parts_lifetime 10 --prewarm_mark_cache 1 --use_const_adaptive_granularity 0 --enable_index_granularity_compression 1 --use_primary_key_cache 0 --prewarm_primary_key_cache 0
2025-04-01 21:55:28
2025-04-01 21:55:28 Database: test_df5g6uh0
2025-04-01 21:55:28 01099_parallel_distributed_insert_select: [ FAIL ] 43.33 sec.
2025-04-01 21:55:28 Reason: server died 32
2025-04-01 21:55:28 [c6c6cb6f00e6] 2025.04.01 14:54:41.649760 [ 4059 ] {36738c40-c9bd-49ff-8c5d-a6cb75e0448d} : Logical error: 'Replica info is not initialized'.
2025-04-01 21:55:28 [c6c6cb6f00e6] 2025.04.01 14:54:42.009370 [ 4059 ] {36738c40-c9bd-49ff-8c5d-a6cb75e0448d} : Stack trace (when copying this message, always include the lines below):
2025-04-01 21:55:28
2025-04-01 21:55:28 0. ./contrib/llvm-project/libcxx/include/exception:141: Poco::Exception::Exception(String const&, int) @ 0x00000000456d53c9
2025-04-01 21:55:28 1. ./build_docker/./src/Common/Exception.cpp:105: DB::Exception::Exception(DB::Exception::MessageMasked&&, int, bool) @ 0x00000000206d2853
2025-04-01 21:55:28 2. DB::Exception::Exception(PreformattedMessage&&, int) @ 0x00000000088cd1d5
2025-04-01 21:55:28 3. DB::Exception::Exception<>(int, FormatStringHelperImpl<>) @ 0x00000000088f4804
2025-04-01 21:55:28 4. ./build_docker/./src/QueryPipeline/RemoteQueryExecutor.cpp:727: DB::RemoteQueryExecutor::processReadTaskRequest() @ 0x0000000034b283cd
2025-04-01 21:55:28 5. ./build_docker/./src/QueryPipeline/RemoteQueryExecutor.cpp:623: DB::RemoteQueryExecutor::processPacket(DB::Packet) @ 0x0000000034b21488
2025-04-01 21:55:28 6. ./build_docker/./src/QueryPipeline/RemoteQueryExecutor.cpp:562: DB::RemoteQueryExecutor::readAsync() @ 0x0000000034b25dc3
2025-04-01 21:55:28 7. ./build_docker/./src/Processors/Sources/RemoteSource.cpp:182: DB::RemoteSource::tryGenerate() @ 0x0000000040415925
2025-04-01 21:55:28 8. ./build_docker/./src/Processors/ISource.cpp:108: DB::ISource::work() @ 0x000000003f6e8571
2025-04-01 21:55:28 9. ./build_docker/./src/Processors/Sources/RemoteSource.cpp:134: DB::RemoteSource::work() @ 0x0000000040414fb0
2025-04-01 21:55:28 10. ./build_docker/./src/Processors/Executors/ExecutionThreadContext.cpp:49: DB::ExecutionThreadContext::executeTask() @ 0x000000003f7538b1
2025-04-01 21:55:28 11. ./build_docker/./src/Processors/Executors/PipelineExecutor.cpp:290: DB::PipelineExecutor::executeStepImpl(unsigned long, std::atomic*) @ 0x000000003f71eb14
2025-04-01 21:55:28 12. ./build_docker/./src/Processors/Executors/PipelineExecutor.cpp:256: DB::PipelineExecutor::executeImpl(unsigned long, bool) @ 0x000000003f71c8c3
2025-04-01 21:55:28 13. ./build_docker/./src/Processors/Executors/PipelineExecutor.cpp:127: DB::PipelineExecutor::execute(unsigned long, bool) @ 0x000000003f71c2f5
2025-04-01 21:55:28 14. ./build_docker/./src/Processors/Executors/CompletedPipelineExecutor.cpp:49: void std::__function::__policy_invoker::__call_impl::ThreadFromGlobalPoolImpl(DB::CompletedPipelineExecutor::execute()::$_0&&)::'lambda'(), void ()>>(std::__function::__policy_storage const*) @ 0x000000003f719de5
2025-04-01 21:55:28 15. ./contrib/llvm-project/libcxx/include/__functional/function.h:848: ? @ 0x00000000209762ca
2025-04-01 21:55:28 16. ./contrib/llvm-project/libcxx/include/__functional/invoke.h:359: ? @ 0x0000000020988a64
2025-04-01 21:55:28 17. ? @ 0x00007fab77e4dac3
2025-04-01 21:55:28 18. ? @ 0x00007fab77edf850
2025-04-01 21:55:28
2025-04-01 21:55:28 [c6c6cb6f00e6] 2025.04.01 14:54:42.015418 [ 26802 ] BaseDaemon: ########################################
2025-04-01 21:55:28 [c6c6cb6f00e6] 2025.04.01 14:54:42.016232 [ 26802 ] BaseDaemon: (version 24.12.2.20221.altinityantalya (altinity build), build id: E3EBE392F6E30F869D3C8A7C787AA68127B123A4, git hash: 82252d159dc02cab0f366aaa5691adc1545dd11d) (from thread 4059) (query_id: 36738c40-c9bd-49ff-8c5d-a6cb75e0448d) (query: INSERT INTO distributed_01099_b SELECT * FROM urlCluster('test_cluster_two_shards', 'http://localhost:8123/?query=select+{1,2,3}+format+TSV', 'TSV', 's String');) Received signal Aborted (6)
2025-04-01 21:55:28 [c6c6cb6f00e6] 2025.04.01 14:54:42.018028 [ 26802 ] BaseDaemon:
2025-04-01 21:55:28 [c6c6cb6f00e6] 2025.04.01 14:54:42.018491 [ 26802 ] BaseDaemon: Stack trace: 0x000056504274ba5e 0x00005650431471b7 0x000056502a871bae 0x00007fab77dfb520 0x00007fab77e4f9fd 0x00007fab77dfb476 0x00007fab77de17f3 0x000056504269d48b 0x000056504269ee61 0x000056502a8991d5 0x000056502a8c0804 0x0000565056af43cd 0x0000565056aed488 0x0000565056af1dc3 0x00005650623e1925 0x00005650616b4571 0x00005650623e0fb0 0x000056506171f8b1 0x00005650616eab14 0x00005650616e88c3 0x00005650616e82f5 0x00005650616e5de5 0x00005650429422ca 0x0000565042954a64 0x00007fab77e4dac3 0x00007fab77edf850
2025-04-01 21:55:28 [c6c6cb6f00e6] 2025.04.01 14:54:42.342953 [ 26802 ] BaseDaemon: 0.0. inlined from ./build_docker/./src/Common/StackTrace.cpp:381: StackTrace::tryCapture()
2025-04-01 21:55:28 [c6c6cb6f00e6] 2025.04.01 14:54:42.343710 [ 26802 ] BaseDaemon: 0. ./build_docker/./src/Common/StackTrace.cpp:350: StackTrace::StackTrace(ucontext_t const&) @ 0x000000002077fa5e
2025-04-01 21:55:28 [c6c6cb6f00e6] 2025.04.01 14:54:42.818722 [ 26802 ] BaseDaemon: 1. ./build_docker/./src/Common/SignalHandlers.cpp:102: signalHandler(int, siginfo_t*, void*) @ 0x000000002117b1b7
2025-04-01 21:55:28 [c6c6cb6f00e6] 2025.04.01 14:54:46.216005 [ 26802 ] BaseDaemon: 2. SignalAction(int, void*, void*) @ 0x00000000088a5bae
2025-04-01 21:55:28 [c6c6cb6f00e6] 2025.04.01 14:54:46.216211 [ 26802 ] BaseDaemon: 3. ? @ 0x00007fab77dfb520
2025-04-01 21:55:28 [c6c6cb6f00e6] 2025.04.01 14:54:46.219608 [ 26802 ] BaseDaemon: 4. ? @ 0x00007fab77e4f9fd
2025-04-01 21:55:28 [c6c6cb6f00e6] 2025.04.01 14:54:46.219787 [ 26802 ] BaseDaemon: 5. ? @ 0x00007fab77dfb476
2025-04-01 21:55:28 [c6c6cb6f00e6] 2025.04.01 14:54:46.219899 [ 26802 ] BaseDaemon: 6. ? @ 0x00007fab77de17f3
2025-04-01 21:55:28 [c6c6cb6f00e6] 2025.04.01 14:54:46.819038 [ 26802 ] BaseDaemon: 7. ./build_docker/./src/Common/Exception.cpp:48: DB::abortOnFailedAssertion(String const&, void* const*, unsigned long, unsigned long) @ 0x00000000206d148b
2025-04-01 21:55:28 [c6c6cb6f00e6] 2025.04.01 14:54:47.419382 [ 26802 ] BaseDaemon: 8.0. inlined from ./build_docker/./src/Common/Exception.cpp:70: DB::handle_error_code(String const&, int, bool, std::vector> const&)
2025-04-01 21:55:28 [c6c6cb6f00e6] 2025.04.01 14:54:47.419544 [ 26802 ] BaseDaemon: 8. ./build_docker/./src/Common/Exception.cpp:111: DB::Exception::Exception(DB::Exception::MessageMasked&&, int, bool) @ 0x00000000206d2e61
2025-04-01 21:55:28 [c6c6cb6f00e6] 2025.04.01 14:54:47.702502 [ 26802 ] BaseDaemon: 9. DB::Exception::Exception(PreformattedMessage&&, int) @ 0x00000000088cd1d5
2025-04-01 21:55:28 [c6c6cb6f00e6] 2025.04.01 14:54:47.986515 [ 26802 ] BaseDaemon: 10. DB::Exception::Exception<>(int, FormatStringHelperImpl<>) @ 0x00000000088f4804
2025-04-01 21:55:28 [c6c6cb6f00e6] 2025.04.01 14:54:49.350065 [ 26802 ] BaseDaemon: 11. ./build_docker/./src/QueryPipeline/RemoteQueryExecutor.cpp:727: DB::RemoteQueryExecutor::processReadTaskRequest() @ 0x0000000034b283cd
2025-04-01 21:55:28 [c6c6cb6f00e6] 2025.04.01 14:54:50.553569 [ 26802 ] BaseDaemon: 12. ./build_docker/./src/QueryPipeline/RemoteQueryExecutor.cpp:623: DB::RemoteQueryExecutor::processPacket(DB::Packet) @ 0x0000000034b21488
2025-04-01 21:55:28 [c6c6cb6f00e6] 2025.04.01 14:54:51.825748 [ 26802 ] BaseDaemon: 13. ./build_docker/./src/QueryPipeline/RemoteQueryExecutor.cpp:562: DB::RemoteQueryExecutor::readAsync() @ 0x0000000034b25dc3
2025-04-01 21:55:28 [c6c6cb6f00e6] 2025.04.01 14:54:52.214273 [ 26802 ] BaseDaemon: 14. ./build_docker/./src/Processors/Sources/RemoteSource.cpp:182: DB::RemoteSource::tryGenerate() @ 0x0000000040415925
2025-04-01 21:55:28 [c6c6cb6f00e6] 2025.04.01 14:54:52.460221 [ 26802 ] BaseDaemon: 15. ./build_docker/./src/Processors/ISource.cpp:108: DB::ISource::work() @ 0x000000003f6e8571
2025-04-01 21:55:28 [c6c6cb6f00e6] 2025.04.01 14:54:52.755429 [ 26802 ] BaseDaemon: 16. ./build_docker/./src/Processors/Sources/RemoteSource.cpp:134: DB::RemoteSource::work() @ 0x0000000040414fb0
2025-04-01 21:55:28 [c6c6cb6f00e6] 2025.04.01 14:54:52.926172 [ 26802 ] BaseDaemon: 17.0. inlined from ./build_docker/./src/Processors/Executors/ExecutionThreadContext.cpp:49: DB::executeJob(DB::ExecutingGraph::Node*, DB::ReadProgressCallback*)
2025-04-01 21:55:28 [c6c6cb6f00e6] 2025.04.01 14:54:52.926340 [ 26802 ] BaseDaemon: 17. ./build_docker/./src/Processors/Executors/ExecutionThreadContext.cpp:98: DB::ExecutionThreadContext::executeTask() @ 0x000000003f7538b1
2025-04-01 21:55:28 [c6c6cb6f00e6] 2025.04.01 14:54:53.427953 [ 26802 ] BaseDaemon: 18. ./build_docker/./src/Processors/Executors/PipelineExecutor.cpp:290: DB::PipelineExecutor::executeStepImpl(unsigned long, std::atomic*) @ 0x000000003f71eb14
2025-04-01 21:55:28 [c6c6cb6f00e6] 2025.04.01 14:54:53.771735 [ 26802 ] BaseDaemon: 19.0. inlined from ./build_docker/./src/Processors/Executors/PipelineExecutor.cpp:256: DB::PipelineExecutor::executeSingleThread(unsigned long)
2025-04-01 21:55:28 [c6c6cb6f00e6] 2025.04.01 14:54:53.771998 [ 26802 ] BaseDaemon: 19. ./build_docker/./src/Processors/Executors/PipelineExecutor.cpp:442: DB::PipelineExecutor::executeImpl(unsigned long, bool) @ 0x000000003f71c8c3
2025-04-01 21:55:28 [c6c6cb6f00e6] 2025.04.01 14:54:54.124930 [ 26802 ] BaseDaemon: 20. ./build_docker/./src/Processors/Executors/PipelineExecutor.cpp:127: DB::PipelineExecutor::execute(unsigned long, bool) @ 0x000000003f71c2f5
2025-04-01 21:55:28 [c6c6cb6f00e6] 2025.04.01 14:54:54.311733 [ 26802 ] BaseDaemon: 21.0. inlined from ./build_docker/./src/Processors/Executors/CompletedPipelineExecutor.cpp:49: DB::threadFunction(DB::CompletedPipelineExecutor::Data&, std::shared_ptr, unsigned long, bool)
2025-04-01 21:55:28 [c6c6cb6f00e6] 2025.04.01 14:54:54.311885 [ 26802 ] BaseDaemon: 21.1. inlined from ./build_docker/./src/Processors/Executors/CompletedPipelineExecutor.cpp:89: operator()
2025-04-01 21:55:28 [c6c6cb6f00e6] 2025.04.01 14:54:54.311992 [ 26802 ] BaseDaemon: 21.2. inlined from ./contrib/llvm-project/libcxx/include/__functional/invoke.h:394: ?
2025-04-01 21:55:28 [c6c6cb6f00e6] 2025.04.01 14:54:54.312152 [ 26802 ] BaseDaemon: 21.3. inlined from ./contrib/llvm-project/libcxx/include/tuple:1789: _ZNSt3__118__apply_tuple_implB6v15007IRZN2DB25CompletedPipelineExecutor7executeEvE3$_0RNS_5tupleIJEEETpTnmJEEEDcOT_OT0_NS_15__tuple_indicesIJXspT1_EEEE
2025-04-01 21:55:28 [c6c6cb6f00e6] 2025.04.01 14:54:54.312328 [ 26802 ] BaseDaemon: 21.4. inlined from ./contrib/llvm-project/libcxx/include/tuple:1798: decltype(auto) std::apply[abi:v15007]&>(DB::CompletedPipelineExecutor::execute()::$_0&, std::tuple<>&)
2025-04-01 21:55:28 [c6c6cb6f00e6] 2025.04.01 14:54:54.312425 [ 26802 ] BaseDaemon: 21.5. inlined from ./src/Common/ThreadPool.h:311: operator()
2025-04-01 21:55:28 [c6c6cb6f00e6] 2025.04.01 14:54:54.312502 [ 26802 ] BaseDaemon: 21.6. inlined from ./contrib/llvm-project/libcxx/include/__functional/invoke.h:394: ?
2025-04-01 21:55:28 [c6c6cb6f00e6] 2025.04.01 14:54:54.312579 [ 26802 ] BaseDaemon: 21.7. inlined from ./contrib/llvm-project/libcxx/include/__functional/invoke.h:479: ?
2025-04-01 21:55:28 [c6c6cb6f00e6] 2025.04.01 14:54:54.312669 [ 26802 ] BaseDaemon: 21.8. inlined from ./contrib/llvm-project/libcxx/include/__functional/function.h:235: ?
2025-04-01 21:55:28 [c6c6cb6f00e6] 2025.04.01 14:54:54.312752 [ 26802 ] BaseDaemon: 21. ./contrib/llvm-project/libcxx/include/__functional/function.h:716: ? @ 0x000000003f719de5
2025-04-01 21:55:28 [c6c6cb6f00e6] 2025.04.01 14:54:54.589931 [ 26802 ] BaseDaemon: 22.0. inlined from ./contrib/llvm-project/libcxx/include/__functional/function.h:848: ?
2025-04-01 21:55:28 [c6c6cb6f00e6] 2025.04.01 14:54:54.590094 [ 26802 ] BaseDaemon: 22.1. inlined from ./contrib/llvm-project/libcxx/include/__functional/function.h:1197: ?
2025-04-01 21:55:28 [c6c6cb6f00e6] 2025.04.01 14:54:54.590199 [ 26802 ] BaseDaemon: 22. ./build_docker/./src/Common/ThreadPool.cpp:785: ThreadPoolImpl::ThreadFromThreadPool::worker() @ 0x00000000209762ca
2025-04-01 21:55:28 [c6c6cb6f00e6] 2025.04.01 14:54:55.076568 [ 26802 ] BaseDaemon: 23.0. inlined from ./contrib/llvm-project/libcxx/include/__functional/invoke.h:359: ?
2025-04-01 21:55:28 [c6c6cb6f00e6] 2025.04.01 14:54:55.077147 [ 26802 ] BaseDaemon: 23.1. inlined from ./contrib/llvm-project/libcxx/include/thread:284: void std::__thread_execute[abi:v15007]>, void (ThreadPoolImpl::ThreadFromThreadPool::*)(), ThreadPoolImpl::ThreadFromThreadPool*, 2ul>(std::tuple>, void (ThreadPoolImpl::ThreadFromThreadPool::*)(), ThreadPoolImpl::ThreadFromThreadPool*>&, std::__tuple_indices<2ul>)
2025-04-01 21:55:28 [c6c6cb6f00e6] 2025.04.01 14:54:55.077380 [ 26802 ] BaseDaemon: 23. ./contrib/llvm-project/libcxx/include/thread:295: void* std::__thread_proxy[abi:v15007]>, void (ThreadPoolImpl::ThreadFromThreadPool::*)(), ThreadPoolImpl::ThreadFromThreadPool*>>(void*) @ 0x0000000020988a64
2025-04-01 21:55:28 [c6c6cb6f00e6] 2025.04.01 14:54:55.077615 [ 26802 ] BaseDaemon: 24. ? @ 0x00007fab77e4dac3
2025-04-01 21:55:28 [c6c6cb6f00e6] 2025.04.01 14:54:55.077747 [ 26802 ] BaseDaemon: 25. ? @ 0x00007fab77edf850
2025-04-01 21:55:28 [c6c6cb6f00e6] 2025.04.01 14:55:05.231487 [ 26802 ] BaseDaemon: Integrity check of the executable successfully passed (checksum: 003D0F8422331117266F6835F6AB47FF)
2025-04-01 21:55:28 [c6c6cb6f00e6] 2025.04.01 14:55:14.873946 [ 26802 ] BaseDaemon: This ClickHouse version is not official and should be upgraded to the official build.
2025-04-01 21:55:28 [c6c6cb6f00e6] 2025.04.01 14:55:14.875484 [ 26802 ] BaseDaemon: Changed settings: min_compress_block_size = 2676963, max_compress_block_size = 1361695, max_block_size = 23650, min_external_table_block_size_bytes = 100000000, max_joined_block_size_rows = 26715, max_insert_threads = 1, max_threads = 2, max_parsing_threads = 1, max_read_buffer_size = 971032, connect_timeout_with_failover_ms = 2000, connect_timeout_with_failover_secure_ms = 3000, idle_connection_timeout = 36000, s3_max_get_rps = 1000000, s3_max_get_burst = 2000000, s3_max_put_rps = 1000000, s3_max_put_burst = 2000000, s3_check_objects_after_upload = true, use_uncompressed_cache = true, max_remote_read_network_bandwidth = 1000000000000, max_remote_write_network_bandwidth = 1000000000000, max_local_read_bandwidth = 1000000000000, max_local_write_bandwidth = 1000000000000, stream_like_engine_allow_direct_select = true, enable_multiple_prewhere_read_steps = false, replication_wait_for_inactive_replica_timeout = 30, min_count_to_compile_expression = 0, min_count_to_compile_aggregate_expression = 0, compile_sort_description = false, group_by_two_level_threshold = 1000000, group_by_two_level_threshold_bytes = 1, enable_memory_bound_merging_of_aggregation_results = false, allow_nonconst_timezone_arguments = true, parallel_distributed_insert_select = 1, min_chunk_bytes_for_parallel_parsing = 6692138, merge_tree_coarse_index_granularity = 28, min_bytes_to_use_direct_io = 10737418240, min_bytes_to_use_mmap_io = 8983766593, log_queries = true, insert_quorum_timeout = 60000, merge_tree_read_split_ranges_into_intersecting_and_non_intersecting_injection_probability = 0.9200000166893005, http_response_buffer_size = 3906084, fsync_metadata = true, query_plan_join_swap_table = true, http_send_timeout = 60., http_receive_timeout = 60., use_index_for_in_with_subqueries_max_values = 1000000000, opentelemetry_start_trace_probability = 0.10000000149011612, enable_vertical_final = false, max_rows_to_read = 20000000, max_bytes_to_read = 1000000000000, max_bytes_to_read_leaf = 1000000000000, max_rows_to_group_by = 10000000000, max_bytes_before_external_group_by = 0, max_rows_to_sort = 10000000000, max_bytes_to_sort = 10000000000, prefer_external_sort_block_bytes = 1, max_bytes_before_external_sort = 10737418240, max_bytes_before_remerge_sort = 2611551233, max_result_rows = 1000000000, max_result_bytes = 1000000000, max_execution_time = 600., max_execution_time_leaf = 600., max_execution_speed = 100000000000, max_execution_speed_bytes = 10000000000000, timeout_before_checking_execution_speed = 300., max_estimated_execution_time = 600., max_columns_to_read = 20000, max_temporary_columns = 20000, max_temporary_non_const_columns = 20000, max_rows_in_set = 10000000000, max_bytes_in_set = 10000000000, max_rows_in_join = 10000000000, max_bytes_in_join = 10000000000, cross_join_min_rows_to_compress = 100000000, cross_join_min_bytes_to_compress = 0, max_rows_to_transfer = 1000000000, max_bytes_to_transfer = 1000000000, max_rows_in_distinct = 10000000000, max_bytes_in_distinct = 10000000000, max_memory_usage = 5000000000, max_memory_usage_for_user = 32000000000, max_untracked_memory = 1048576, memory_profiler_step = 1048576, max_network_bandwidth = 100000000000, max_network_bytes = 1000000000000, max_network_bandwidth_for_user = 100000000000, max_network_bandwidth_for_all_users = 100000000000, max_temporary_data_on_disk_size_for_user = 100000000000, max_temporary_data_on_disk_size_for_query = 100000000000, max_backup_bandwidth = 100000000000, log_comment = '01099_parallel_distributed_insert_select.sql', send_logs_level = 'error', prefer_localhost_replica = false, optimize_read_in_order = false, aggregation_in_order_max_block_bytes = 20435763, read_in_order_two_level_merge_threshold = 93, max_hyperscan_regexp_length = 1000000, max_hyperscan_regexp_total_length = 10000000, allow_introspection_functions = true, database_atomic_wait_for_drop_and_detach_synchronously = true, optimize_if_transform_strings_to_enum = true, optimize_substitute_columns = true, query_cache_max_size_in_bytes = 10000000, query_cache_max_entries = 100000, distributed_ddl_entry_format_version = 6, external_storage_max_read_rows = 10000000000, external_storage_max_read_bytes = 10000000000, local_filesystem_read_method = 'read', local_filesystem_read_prefetch = true, merge_tree_min_bytes_per_task_for_remote_reading = 1048576, merge_tree_compact_parts_min_granules_to_multibuffer_read = 31, async_insert_busy_timeout_max_ms = 5000, enable_filesystem_cache = true, enable_filesystem_cache_on_write_operations = true, read_from_filesystem_cache_if_exists_otherwise_bypass_cache = true, throw_on_error_from_cache_on_write_operations = true, filesystem_cache_segments_batch_size = 3, use_page_cache_for_disks_without_file_cache = true, load_marks_asynchronously = true, allow_prefetched_read_pool_for_remote_filesystem = true, allow_prefetched_read_pool_for_local_filesystem = false, filesystem_prefetch_step_bytes = 104857600, filesystem_prefetch_step_marks = 50, filesystem_prefetch_max_memory_usage = 67108864, filesystem_prefetches_limit = 10, max_streams_for_merge_tree_reading = 1000, insert_keeper_max_retries = 100, insert_keeper_retry_initial_backoff_ms = 1, insert_keeper_retry_max_backoff_ms = 10, insert_keeper_fault_injection_probability = 0.009999999776482582, allow_experimental_parallel_reading_from_replicas = 0, parallel_replicas_local_plan = false, session_timezone = 'Mexico/BajaSur'
2025-04-01 21:55:28 Error on processing query: Code: 32. DB::Exception: Attempt to read after eof: while receiving packet from localhost:9000. (ATTEMPT_TO_READ_AFTER_EOF) (version 24.12.2.20221.altinityantalya (altinity build))
2025-04-01 21:55:28 (query: INSERT INTO distributed_01099_b SELECT * FROM urlCluster('test_cluster_two_shards', 'http://localhost:8123/?query=select+{1,2,3}+format+TSV', 'TSV', 's String');)
2025-04-01 21:55:28 , result:
2025-04-01 21:55:28
2025-04-01 21:55:28 parallel_distributed_insert_select=1
2025-04-01 21:55:28 test_shard_localhost
2025-04-01 21:55:28 0
2025-04-01 21:55:28 1
2025-04-01 21:55:28 2
2025-04-01 21:55:28 test_cluster_two_shards_localhost
2025-04-01 21:55:28 0 2
2025-04-01 21:55:28 1 2
2025-04-01 21:55:28 2 2
2025-04-01 21:55:28 test_cluster_two_shards
2025-04-01 21:55:28 distributed
2025-04-01 21:55:28 local
2025-04-01 21:55:28 0 2
2025-04-01 21:55:28 1 2
2025-04-01 21:55:28 2 2
2025-04-01 21:55:28 distributed
2025-04-01 21:55:28 0 4
2025-04-01 21:55:28 1 4
2025-04-01 21:55:28 2 4
2025-04-01 21:55:28 test_cluster_1_shard_3_replicas_1_unavailable
2025-04-01 21:55:28 distributed
2025-04-01 21:55:28 local
2025-04-01 21:55:28 test_cluster_1_shard_3_replicas_1_unavailable with storageCluster
2025-04-01 21:55:28
2025-04-01 21:55:28 stdout:
2025-04-01 21:55:28 parallel_distributed_insert_select=1
2025-04-01 21:55:28 test_shard_localhost
2025-04-01 21:55:28 0
2025-04-01 21:55:28 1
2025-04-01 21:55:28 2
2025-04-01 21:55:28 test_cluster_two_shards_localhost
2025-04-01 21:55:28 0 2
2025-04-01 21:55:28 1 2
2025-04-01 21:55:28 2 2
2025-04-01 21:55:28 test_cluster_two_shards
2025-04-01 21:55:28 distributed
2025-04-01 21:55:28 local
2025-04-01 21:55:28 0 2
2025-04-01 21:55:28 1 2
2025-04-01 21:55:28 2 2
2025-04-01 21:55:28 distributed
2025-04-01 21:55:28 0 4
2025-04-01 21:55:28 1 4
2025-04-01 21:55:28 2 4
2025-04-01 21:55:28 test_cluster_1_shard_3_replicas_1_unavailable
2025-04-01 21:55:28 distributed
2025-04-01 21:55:28 local
2025-04-01 21:55:28 test_cluster_1_shard_3_replicas_1_unavailable with storageCluster
2025-04-01 21:55:28
2025-04-01 21:55:28
2025-04-01 21:55:28 Settings used in the test: --max_insert_threads 1 --group_by_two_level_threshold 1000000 --group_by_two_level_threshold_bytes 1 --distributed_aggregation_memory_efficient 1 --fsync_metadata 1 --output_format_parallel_formatting 1 --input_format_parallel_parsing 1 --min_chunk_bytes_for_parallel_parsing 6692138 --max_read_buffer_size 971032 --prefer_localhost_replica 1 --max_block_size 23650 --max_joined_block_size_rows 26715 --max_threads 2 --optimize_append_index 0 --optimize_if_chain_to_multiif 0 --optimize_if_transform_strings_to_enum 1 --optimize_read_in_order 0 --optimize_or_like_chain 0 --optimize_substitute_columns 1 --enable_multiple_prewhere_read_steps 0 --read_in_order_two_level_merge_threshold 93 --optimize_aggregation_in_order 0 --aggregation_in_order_max_block_bytes 20435763 --use_uncompressed_cache 1 --min_bytes_to_use_direct_io 10737418240 --min_bytes_to_use_mmap_io 8983766593 --local_filesystem_read_method read --remote_filesystem_read_method threadpool --local_filesystem_read_prefetch 1 --filesystem_cache_segments_batch_size 3 --read_from_filesystem_cache_if_exists_otherwise_bypass_cache 1 --throw_on_error_from_cache_on_write_operations 1 --remote_filesystem_read_prefetch 1 --allow_prefetched_read_pool_for_remote_filesystem 1 --filesystem_prefetch_max_memory_usage 64Mi --filesystem_prefetches_limit 10 --filesystem_prefetch_min_bytes_for_single_read_task 1Mi --filesystem_prefetch_step_marks 50 --filesystem_prefetch_step_bytes 100Mi --compile_aggregate_expressions 1 --compile_sort_description 0 --merge_tree_coarse_index_granularity 28 --optimize_distinct_in_order 1 --max_bytes_before_external_sort 10737418240 --max_bytes_before_external_group_by 0 --max_bytes_before_remerge_sort 2611551233 --min_compress_block_size 2676963 --max_compress_block_size 1361695 --merge_tree_compact_parts_min_granules_to_multibuffer_read 31 --optimize_sorting_by_input_stream_properties 1 --http_response_buffer_size 3906084 --http_wait_end_of_query False --enable_memory_bound_merging_of_aggregation_results 0 --min_count_to_compile_expression 0 --min_count_to_compile_aggregate_expression 0 --min_count_to_compile_sort_description 3 --session_timezone Mexico/BajaSur --use_page_cache_for_disks_without_file_cache True --page_cache_inject_eviction False --merge_tree_read_split_ranges_into_intersecting_and_non_intersecting_injection_probability 0.92 --prefer_external_sort_block_bytes 1 --cross_join_min_rows_to_compress 100000000 --cross_join_min_bytes_to_compress 0 --min_external_table_block_size_bytes 100000000 --max_parsing_threads 1 --optimize_functions_to_subcolumns 1 --parallel_replicas_local_plan 0 --query_plan_join_swap_table true --output_format_native_write_json_as_string 0 --enable_vertical_final 0
2025-04-01 21:55:28
2025-04-01 21:55:28 MergeTree settings used in test: --ratio_of_defaults_for_sparse_serialization 1.0 --prefer_fetch_merged_part_size_threshold 10737418240 --vertical_merge_algorithm_min_rows_to_activate 1000000 --vertical_merge_algorithm_min_columns_to_activate 1 --allow_vertical_merges_from_compact_to_wide_parts 0 --min_merge_bytes_to_use_direct_io 4959440889 --index_granularity_bytes 25386338 --merge_max_block_size 2181 --index_granularity 25565 --min_bytes_for_wide_part 805531677 --marks_compress_block_size 77315 --primary_key_compress_block_size 70210 --replace_long_file_name_to_hash 0 --max_file_name_length 0 --min_bytes_for_full_part_storage 172184581 --compact_parts_max_bytes_to_buffer 319527718 --compact_parts_max_granules_to_buffer 183 --compact_parts_merge_max_bytes_to_prefetch_part 16732535 --cache_populated_by_fetch 0 --concurrent_part_removal_threshold 64 --old_parts_lifetime 480 --prewarm_mark_cache 0 --use_const_adaptive_granularity 0 --enable_index_granularity_compression 1 --use_primary_key_cache 0 --prewarm_primary_key_cache 0
2025-04-01 21:55:28
2025-04-01 21:55:28 Database: test_y0bxm8fu
2025-04-01 21:55:28 00861_decimal_quoted_csv: [ FAIL ] 1.31 sec.
2025-04-01 21:55:28 Reason: return code: 210
2025-04-01 21:55:28 Code: 210. DB::NetException: Connection reset by peer, while reading from socket (peer: [::1]:9000, local: [::1]:60336): (localhost:9000, ::1, local address: [::1]:60336). (NETWORK_ERROR)
2025-04-01 21:55:28
2025-04-01 21:55:28 , result:
2025-04-01 21:55:28
2025-04-01 21:55:28
2025-04-01 21:55:28
2025-04-01 21:55:28 stdout:
2025-04-01 21:55:28
2025-04-01 21:55:28
2025-04-01 21:55:28 Settings used in the test: --max_insert_threads 1 --group_by_two_level_threshold 103548 --group_by_two_level_threshold_bytes 25512116 --distributed_aggregation_memory_efficient 1 --fsync_metadata 1 --output_format_parallel_formatting 0 --input_format_parallel_parsing 1 --min_chunk_bytes_for_parallel_parsing 14823653 --max_read_buffer_size 887739 --prefer_localhost_replica 1 --max_block_size 91871 --max_joined_block_size_rows 81863 --max_threads 1 --optimize_append_index 0 --optimize_if_chain_to_multiif 0 --optimize_if_transform_strings_to_enum 1 --optimize_read_in_order 1 --optimize_or_like_chain 1 --optimize_substitute_columns 0 --enable_multiple_prewhere_read_steps 1 --read_in_order_two_level_merge_threshold 49 --optimize_aggregation_in_order 1 --aggregation_in_order_max_block_bytes 10702132 --use_uncompressed_cache 0 --min_bytes_to_use_direct_io 10737418240 --min_bytes_to_use_mmap_io 10737418240 --local_filesystem_read_method io_uring --remote_filesystem_read_method threadpool --local_filesystem_read_prefetch 1 --filesystem_cache_segments_batch_size 1 --read_from_filesystem_cache_if_exists_otherwise_bypass_cache 1 --throw_on_error_from_cache_on_write_operations 1 --remote_filesystem_read_prefetch 0 --allow_prefetched_read_pool_for_remote_filesystem 1 --filesystem_prefetch_max_memory_usage 64Mi --filesystem_prefetches_limit 0 --filesystem_prefetch_min_bytes_for_single_read_task 8Mi --filesystem_prefetch_step_marks 50 --filesystem_prefetch_step_bytes 100Mi --compile_aggregate_expressions 0 --compile_sort_description 1 --merge_tree_coarse_index_granularity 21 --optimize_distinct_in_order 1 --max_bytes_before_external_sort 2379518480 --max_bytes_before_external_group_by 0 --max_bytes_before_remerge_sort 423168541 --min_compress_block_size 582579 --max_compress_block_size 2839814 --merge_tree_compact_parts_min_granules_to_multibuffer_read 73 --optimize_sorting_by_input_stream_properties 1 --http_response_buffer_size 5588374 --http_wait_end_of_query True --enable_memory_bound_merging_of_aggregation_results 1 --min_count_to_compile_expression 0 --min_count_to_compile_aggregate_expression 0 --min_count_to_compile_sort_description 0 --session_timezone America/Hermosillo --use_page_cache_for_disks_without_file_cache True --page_cache_inject_eviction True --merge_tree_read_split_ranges_into_intersecting_and_non_intersecting_injection_probability 0.41 --prefer_external_sort_block_bytes 1 --cross_join_min_rows_to_compress 1 --cross_join_min_bytes_to_compress 1 --min_external_table_block_size_bytes 0 --max_parsing_threads 10 --optimize_functions_to_subcolumns 1 --parallel_replicas_local_plan 1 --query_plan_join_swap_table true --output_format_native_write_json_as_string 0 --enable_vertical_final 0
2025-04-01 21:55:28
2025-04-01 21:55:28 MergeTree settings used in test: --ratio_of_defaults_for_sparse_serialization 0.7128994582603441 --prefer_fetch_merged_part_size_threshold 10737418240 --vertical_merge_algorithm_min_rows_to_activate 1000000 --vertical_merge_algorithm_min_columns_to_activate 100 --allow_vertical_merges_from_compact_to_wide_parts 1 --min_merge_bytes_to_use_direct_io 10737418240 --index_granularity_bytes 1849928 --merge_max_block_size 1102 --index_granularity 7768 --min_bytes_for_wide_part 529925453 --marks_compress_block_size 67130 --primary_key_compress_block_size 91534 --replace_long_file_name_to_hash 0 --max_file_name_length 26 --min_bytes_for_full_part_storage 499192991 --compact_parts_max_bytes_to_buffer 466813739 --compact_parts_max_granules_to_buffer 66 --compact_parts_merge_max_bytes_to_prefetch_part 16017412 --cache_populated_by_fetch 0 --concurrent_part_removal_threshold 72 --old_parts_lifetime 480 --prewarm_mark_cache 0 --use_const_adaptive_granularity 1 --enable_index_granularity_compression 0 --use_primary_key_cache 0 --prewarm_primary_key_cache 0
2025-04-01 21:55:28
2025-04-01 21:55:28 Database: test_6fmyolxd
2025-04-01 21:55:28 00700_decimal_casts_2: [ FAIL ] 12.88 sec.
2025-04-01 21:55:28 Reason: return code: 32
2025-04-01 21:55:28 Expected server error code '407' but got no server error (query: SELECT toDecimal128('-1', 7) AS x, toUInt8(x); -- { serverError DECIMAL_OVERFLOW }).
2025-04-01 21:55:28 Error on processing query: Code: 32. DB::Exception: Attempt to read after eof: while receiving packet from localhost:9000. (ATTEMPT_TO_READ_AFTER_EOF) (version 24.12.2.20221.altinityantalya (altinity build))
2025-04-01 21:55:28 (query: SELECT toDecimal128('-1', 7) AS x, toUInt8(x); -- { serverError DECIMAL_OVERFLOW })
2025-04-01 21:55:28 , result:
2025-04-01 21:55:28
2025-04-01 21:55:28 1234567890 1234567890 1234567890
2025-04-01 21:55:28 -126561577.683753853853498429727072845824
2025-04-01 21:55:28 1234567890 1234567890 1234567890
2025-04-01 21:55:28 12345678 12345678 12345678
2025-04-01 21:55:28 9223372036854775807 9223372036854775807 -9223372036854775807
2025-04-01 21:55:28 9223372036854775800 9223372036854775800 -9223372036854775800
2025-04-01 21:55:28 92233720368547758 92233720368547758 -92233720368547758
2025-04-01 21:55:28 2147483647 2147483647 -2147483647
2025-04-01 21:55:28 2147483647 2147483647 -2147483647
2025-04-01 21:55:28 92233720368547757.99 92233720368547757 -92233720368547757
2025-04-01 21:55:28 2147483640.99 2147483640 -2147483640
2025-04-01 21:55:28 -0.9 0
2025-04-01 21:55:28 -0.9 0
2025-04-01 21:55:28 -0.9 0
2025-04-01 21:55:28 -0.8 0
2025-04-01 21:55:28 -0.8 0
2025-04-01 21:55:28 -0.8 0
2025-04-01 21:55:28 -0.7 0
2025-04-01 21:55:28 -0.7 0
2025-04-01 21:55:28 -0.7 0
2025-04-01 21:55:28 -0.6 0
2025-04-01 21:55:28 -0.6 0
2025-04-01 21:55:28 -0.6 0
2025-04-01 21:55:28
2025-04-01 21:55:28 stdout:
2025-04-01 21:55:28 1234567890 1234567890 1234567890
2025-04-01 21:55:28 -126561577.683753853853498429727072845824
2025-04-01 21:55:28 1234567890 1234567890 1234567890
2025-04-01 21:55:28 12345678 12345678 12345678
2025-04-01 21:55:28 9223372036854775807 9223372036854775807 -9223372036854775807
2025-04-01 21:55:28 9223372036854775800 9223372036854775800 -9223372036854775800
2025-04-01 21:55:28 92233720368547758 92233720368547758 -92233720368547758
2025-04-01 21:55:28 2147483647 2147483647 -2147483647
2025-04-01 21:55:28 2147483647 2147483647 -2147483647
2025-04-01 21:55:28 92233720368547757.99 92233720368547757 -92233720368547757
2025-04-01 21:55:28 2147483640.99 2147483640 -2147483640
2025-04-01 21:55:28 -0.9 0
2025-04-01 21:55:28 -0.9 0
2025-04-01 21:55:28 -0.9 0
2025-04-01 21:55:28 -0.8 0
2025-04-01 21:55:28 -0.8 0
2025-04-01 21:55:28 -0.8 0
2025-04-01 21:55:28 -0.7 0
2025-04-01 21:55:28 -0.7 0
2025-04-01 21:55:28 -0.7 0
2025-04-01 21:55:28 -0.6 0
2025-04-01 21:55:28 -0.6 0
2025-04-01 21:55:28 -0.6 0
2025-04-01 21:55:28
2025-04-01 21:55:28
2025-04-01 21:55:28 Settings used in the test: --max_insert_threads 1 --group_by_two_level_threshold 1 --group_by_two_level_threshold_bytes 49584305 --distributed_aggregation_memory_efficient 0 --fsync_metadata 1 --output_format_parallel_formatting 0 --input_format_parallel_parsing 0 --min_chunk_bytes_for_parallel_parsing 12287801 --max_read_buffer_size 615990 --prefer_localhost_replica 0 --max_block_size 28087 --max_joined_block_size_rows 24284 --max_threads 3 --optimize_append_index 0 --optimize_if_chain_to_multiif 1 --optimize_if_transform_strings_to_enum 1 --optimize_read_in_order 1 --optimize_or_like_chain 0 --optimize_substitute_columns 0 --enable_multiple_prewhere_read_steps 1 --read_in_order_two_level_merge_threshold 32 --optimize_aggregation_in_order 1 --aggregation_in_order_max_block_bytes 37204966 --use_uncompressed_cache 0 --min_bytes_to_use_direct_io 10737418240 --min_bytes_to_use_mmap_io 1000520577 --local_filesystem_read_method pread --remote_filesystem_read_method threadpool --local_filesystem_read_prefetch 0 --filesystem_cache_segments_batch_size 50 --read_from_filesystem_cache_if_exists_otherwise_bypass_cache 0 --throw_on_error_from_cache_on_write_operations 0 --remote_filesystem_read_prefetch 0 --allow_prefetched_read_pool_for_remote_filesystem 1 --filesystem_prefetch_max_memory_usage 32Mi --filesystem_prefetches_limit 0 --filesystem_prefetch_min_bytes_for_single_read_task 1Mi --filesystem_prefetch_step_marks 50 --filesystem_prefetch_step_bytes 100Mi --compile_aggregate_expressions 1 --compile_sort_description 0 --merge_tree_coarse_index_granularity 6 --optimize_distinct_in_order 1 --max_bytes_before_external_sort 10737418240 --max_bytes_before_external_group_by 10737418240 --max_bytes_before_remerge_sort 1186476231 --min_compress_block_size 424601 --max_compress_block_size 2272518 --merge_tree_compact_parts_min_granules_to_multibuffer_read 15 --optimize_sorting_by_input_stream_properties 1 --http_response_buffer_size 9014879 --http_wait_end_of_query True --enable_memory_bound_merging_of_aggregation_results 1 --min_count_to_compile_expression 3 --min_count_to_compile_aggregate_expression 0 --min_count_to_compile_sort_description 0 --session_timezone Atlantic/Azores --use_page_cache_for_disks_without_file_cache True --page_cache_inject_eviction False --merge_tree_read_split_ranges_into_intersecting_and_non_intersecting_injection_probability 0.45 --prefer_external_sort_block_bytes 0 --cross_join_min_rows_to_compress 100000000 --cross_join_min_bytes_to_compress 100000000 --min_external_table_block_size_bytes 100000000 --max_parsing_threads 10 --optimize_functions_to_subcolumns 0 --parallel_replicas_local_plan 0 --query_plan_join_swap_table false --output_format_native_write_json_as_string 0 --enable_vertical_final 0
2025-04-01 21:55:28
2025-04-01 21:55:28 MergeTree settings used in test: --ratio_of_defaults_for_sparse_serialization 0.13824112571391645 --prefer_fetch_merged_part_size_threshold 4542782816 --vertical_merge_algorithm_min_rows_to_activate 1000000 --vertical_merge_algorithm_min_columns_to_activate 100 --allow_vertical_merges_from_compact_to_wide_parts 1 --min_merge_bytes_to_use_direct_io 1 --index_granularity_bytes 2175666 --merge_max_block_size 5020 --index_granularity 52638 --min_bytes_for_wide_part 142963052 --marks_compress_block_size 37481 --primary_key_compress_block_size 67518 --replace_long_file_name_to_hash 0 --max_file_name_length 0 --min_bytes_for_full_part_storage 536870912 --compact_parts_max_bytes_to_buffer 245740836 --compact_parts_max_granules_to_buffer 8 --compact_parts_merge_max_bytes_to_prefetch_part 9292703 --cache_populated_by_fetch 1 --concurrent_part_removal_threshold 0 --old_parts_lifetime 224 --prewarm_mark_cache 0 --use_const_adaptive_granularity 1 --enable_index_granularity_compression 1 --use_primary_key_cache 0 --prewarm_primary_key_cache 0
2025-04-01 21:55:28
2025-04-01 21:55:28 Database: test_zem730f7
2025-04-01 21:55:28 01626_cnf_fuzz_long: [ FAIL ] 116.15 sec.
2025-04-01 21:55:28 Reason: return code: 1
2025-04-01 21:55:28 Traceback (most recent call last):
2025-04-01 21:55:28 File "/usr/lib/python3/dist-packages/urllib3/connectionpool.py", line 700, in urlopen
2025-04-01 21:55:28 httplib_response = self._make_request(
2025-04-01 21:55:28 File "/usr/lib/python3/dist-packages/urllib3/connectionpool.py", line 446, in _make_request
2025-04-01 21:55:28 six.raise_from(e, None)
2025-04-01 21:55:28 File "", line 3, in raise_from
2025-04-01 21:55:28 File "/usr/lib/python3/dist-packages/urllib3/connectionpool.py", line 441, in _make_request
2025-04-01 21:55:28 httplib_response = conn.getresponse()
2025-04-01 21:55:28 File "/usr/lib/python3.10/http/client.py", line 1375, in getresponse
2025-04-01 21:55:28 response.begin()
2025-04-01 21:55:28 File "/usr/lib/python3.10/http/client.py", line 318, in begin
2025-04-01 21:55:28 version, status, reason = self._read_status()
2025-04-01 21:55:28 File "/usr/lib/python3.10/http/client.py", line 287, in _read_status
2025-04-01 21:55:28 raise RemoteDisconnected("Remote end closed connection without"
2025-04-01 21:55:28 http.client.RemoteDisconnected: Remote end closed connection without response
2025-04-01 21:55:28
2025-04-01 21:55:28 During handling of the above exception, another exception occurred:
2025-04-01 21:55:28
2025-04-01 21:55:28 Traceback (most recent call last):
2025-04-01 21:55:28 File "/usr/local/lib/python3.10/dist-packages/requests/adapters.py", line 667, in send
2025-04-01 21:55:28 resp = conn.urlopen(
2025-04-01 21:55:28 File "/usr/lib/python3/dist-packages/urllib3/connectionpool.py", line 756, in urlopen
2025-04-01 21:55:28 retries = retries.increment(
2025-04-01 21:55:28 File "/usr/lib/python3/dist-packages/urllib3/util/retry.py", line 534, in increment
2025-04-01 21:55:28 raise six.reraise(type(error), error, _stacktrace)
2025-04-01 21:55:28 File "/usr/lib/python3/dist-packages/six.py", line 718, in reraise
2025-04-01 21:55:28 raise value.with_traceback(tb)
2025-04-01 21:55:28 File "/usr/lib/python3/dist-packages/urllib3/connectionpool.py", line 700, in urlopen
2025-04-01 21:55:28 httplib_response = self._make_request(
2025-04-01 21:55:28 File "/usr/lib/python3/dist-packages/urllib3/connectionpool.py", line 446, in _make_request
2025-04-01 21:55:28 six.raise_from(e, None)
2025-04-01 21:55:28 File "", line 3, in raise_from
2025-04-01 21:55:28 File "/usr/lib/python3/dist-packages/urllib3/connectionpool.py", line 441, in _make_request
2025-04-01 21:55:28 httplib_response = conn.getresponse()
2025-04-01 21:55:28 File "/usr/lib/python3.10/http/client.py", line 1375, in getresponse
2025-04-01 21:55:28 response.begin()
2025-04-01 21:55:28 File "/usr/lib/python3.10/http/client.py", line 318, in begin
2025-04-01 21:55:28 version, status, reason = self._read_status()
2025-04-01 21:55:28 File "/usr/lib/python3.10/http/client.py", line 287, in _read_status
2025-04-01 21:55:28 raise RemoteDisconnected("Remote end closed connection without"
2025-04-01 21:55:28 urllib3.exceptions.ProtocolError: ('Connection aborted.', RemoteDisconnected('Remote end closed connection without response'))
2025-04-01 21:55:28
2025-04-01 21:55:28 During handling of the above exception, another exception occurred:
2025-04-01 21:55:28
2025-04-01 21:55:28 Traceback (most recent call last):
2025-04-01 21:55:28 File "/repo/tests/queries/0_stateless/01626_cnf_fuzz_long.python", line 76, in
2025-04-01 21:55:28 res_cnf = client.query(query_cnf).strip()
2025-04-01 21:55:28 File "/repo/tests/queries/0_stateless/helpers/pure_http_client.py", line 43, in query
2025-04-01 21:55:28 r = requests.post(
2025-04-01 21:55:28 File "/usr/local/lib/python3.10/dist-packages/requests/api.py", line 115, in post
2025-04-01 21:55:28 return request("post", url, data=data, json=json, **kwargs)
2025-04-01 21:55:28 File "/usr/local/lib/python3.10/dist-packages/requests/api.py", line 59, in request
2025-04-01 21:55:28 return session.request(method=method, url=url, **kwargs)
2025-04-01 21:55:28 File "/usr/local/lib/python3.10/dist-packages/requests/sessions.py", line 589, in request
2025-04-01 21:55:28 resp = self.send(prep, **send_kwargs)
2025-04-01 21:55:28 File "/usr/local/lib/python3.10/dist-packages/requests/sessions.py", line 703, in send
2025-04-01 21:55:28 r = adapter.send(request, **kwargs)
2025-04-01 21:55:28 File "/usr/local/lib/python3.10/dist-packages/requests/adapters.py", line 682, in send
2025-04-01 21:55:28 raise ConnectionError(err, request=request)
2025-04-01 21:55:28 requests.exceptions.ConnectionError: ('Connection aborted.', RemoteDisconnected('Remote end closed connection without response'))
2025-04-01 21:55:28 , result:
2025-04-01 21:55:28
2025-04-01 21:55:28
2025-04-01 21:55:28
2025-04-01 21:55:28 stdout:
2025-04-01 21:55:28
2025-04-01 21:55:28
2025-04-01 21:55:28 Settings used in the test: --max_insert_threads 1 --group_by_two_level_threshold 328542 --group_by_two_level_threshold_bytes 29613909 --distributed_aggregation_memory_efficient 1 --fsync_metadata 0 --output_format_parallel_formatting 0 --input_format_parallel_parsing 0 --min_chunk_bytes_for_parallel_parsing 15279041 --max_read_buffer_size 870628 --prefer_localhost_replica 1 --max_block_size 99508 --max_joined_block_size_rows 31150 --max_threads 2 --optimize_append_index 0 --optimize_if_chain_to_multiif 0 --optimize_if_transform_strings_to_enum 1 --optimize_read_in_order 0 --optimize_or_like_chain 1 --optimize_substitute_columns 1 --enable_multiple_prewhere_read_steps 1 --read_in_order_two_level_merge_threshold 66 --optimize_aggregation_in_order 1 --aggregation_in_order_max_block_bytes 33321430 --use_uncompressed_cache 1 --min_bytes_to_use_direct_io 9010623116 --min_bytes_to_use_mmap_io 10737418240 --local_filesystem_read_method io_uring --remote_filesystem_read_method read --local_filesystem_read_prefetch 0 --filesystem_cache_segments_batch_size 5 --read_from_filesystem_cache_if_exists_otherwise_bypass_cache 1 --throw_on_error_from_cache_on_write_operations 1 --remote_filesystem_read_prefetch 0 --allow_prefetched_read_pool_for_remote_filesystem 0 --filesystem_prefetch_max_memory_usage 64Mi --filesystem_prefetches_limit 10 --filesystem_prefetch_min_bytes_for_single_read_task 16Mi --filesystem_prefetch_step_marks 50 --filesystem_prefetch_step_bytes 100Mi --compile_aggregate_expressions 1 --compile_sort_description 1 --merge_tree_coarse_index_granularity 11 --optimize_distinct_in_order 0 --max_bytes_before_external_sort 10737418240 --max_bytes_before_external_group_by 10737418240 --max_bytes_before_remerge_sort 2355874880 --min_compress_block_size 1863030 --max_compress_block_size 473313 --merge_tree_compact_parts_min_granules_to_multibuffer_read 60 --optimize_sorting_by_input_stream_properties 1 --http_response_buffer_size 7343518 --http_wait_end_of_query True --enable_memory_bound_merging_of_aggregation_results 0 --min_count_to_compile_expression 3 --min_count_to_compile_aggregate_expression 0 --min_count_to_compile_sort_description 0 --session_timezone Atlantic/Azores --use_page_cache_for_disks_without_file_cache True --page_cache_inject_eviction True --merge_tree_read_split_ranges_into_intersecting_and_non_intersecting_injection_probability 0.87 --prefer_external_sort_block_bytes 100000000 --cross_join_min_rows_to_compress 0 --cross_join_min_bytes_to_compress 100000000 --min_external_table_block_size_bytes 0 --max_parsing_threads 1 --optimize_functions_to_subcolumns 1 --parallel_replicas_local_plan 1 --query_plan_join_swap_table false --output_format_native_write_json_as_string 0 --enable_vertical_final 0
2025-04-01 21:55:28
2025-04-01 21:55:28 MergeTree settings used in test: --ratio_of_defaults_for_sparse_serialization 0.0 --prefer_fetch_merged_part_size_threshold 10737418240 --vertical_merge_algorithm_min_rows_to_activate 880140 --vertical_merge_algorithm_min_columns_to_activate 1 --allow_vertical_merges_from_compact_to_wide_parts 0 --min_merge_bytes_to_use_direct_io 8509059214 --index_granularity_bytes 20588012 --merge_max_block_size 1577 --index_granularity 24964 --min_bytes_for_wide_part 268537808 --marks_compress_block_size 28183 --primary_key_compress_block_size 71405 --replace_long_file_name_to_hash 0 --max_file_name_length 0 --min_bytes_for_full_part_storage 317122269 --compact_parts_max_bytes_to_buffer 412390039 --compact_parts_max_granules_to_buffer 256 --compact_parts_merge_max_bytes_to_prefetch_part 7230580 --cache_populated_by_fetch 0 --concurrent_part_removal_threshold 95 --old_parts_lifetime 480 --prewarm_mark_cache 0 --use_const_adaptive_granularity 1 --enable_index_granularity_compression 1 --use_primary_key_cache 1 --prewarm_primary_key_cache 0
2025-04-01 21:55:28
2025-04-01 21:55:28 Database: test_v12x85y6
2025-04-01 21:55:28 Process Process-3:
2025-04-01 21:55:28 Traceback (most recent call last):
2025-04-01 21:55:28 File "/usr/lib/python3.10/multiprocessing/process.py", line 314, in _bootstrap
2025-04-01 21:55:28 self.run()
2025-04-01 21:55:28 File "/usr/lib/python3.10/multiprocessing/process.py", line 108, in run
2025-04-01 21:55:28 self._target(*self._args, **self._kwargs)
2025-04-01 21:55:28 File "/usr/bin/clickhouse-test", line 2626, in run_tests_process
2025-04-01 21:55:28 return run_tests_array(*args, **kwargs)
2025-04-01 21:55:28 File "/usr/bin/clickhouse-test", line 2407, in run_tests_array
2025-04-01 21:55:28 stop_tests()
2025-04-01 21:55:28 File "/usr/bin/clickhouse-test", line 452, in stop_tests
2025-04-01 21:55:28 cleanup_child_processes(os.getpid())
2025-04-01 21:55:28 File "/usr/bin/clickhouse-test", line 440, in cleanup_child_processes
2025-04-01 21:55:28 child_pgid = os.getpgid(child)
2025-04-01 21:55:28 ProcessLookupError: [Errno 3] No such process
2025-04-01 21:55:28 Child processes of 1377:
2025-04-01 21:55:28 27976 /bin/sh -c pgrep --parent 1377 -a
2025-04-01 21:55:30 00942_mutate_index: [ FAIL ] 9.17 sec.
2025-04-01 21:55:30 Reason: return code: 210
2025-04-01 21:55:30 Code: 210. DB::NetException: Connection reset by peer, while reading from socket (peer: [::1]:9000, local: [::1]:60346): (localhost:9000, ::1, local address: [::1]:60346). (NETWORK_ERROR)
2025-04-01 21:55:30
2025-04-01 21:55:30 Code: 210. DB::NetException: Connection refused (localhost:9000). (NETWORK_ERROR)
2025-04-01 21:55:30
2025-04-01 21:55:30 Code: 210. DB::NetException: Connection refused (localhost:9000). (NETWORK_ERROR)
2025-04-01 21:55:30
2025-04-01 21:55:30 , result:
2025-04-01 21:55:30
2025-04-01 21:55:30 10
2025-04-01 21:55:30 0
2025-04-01 21:55:30
2025-04-01 21:55:30 stdout:
2025-04-01 21:55:30 10
2025-04-01 21:55:30 0
2025-04-01 21:55:30
2025-04-01 21:55:30
2025-04-01 21:55:30 Settings used in the test: --max_insert_threads 1 --group_by_two_level_threshold 1 --group_by_two_level_threshold_bytes 27400647 --distributed_aggregation_memory_efficient 1 --fsync_metadata 1 --output_format_parallel_formatting 1 --input_format_parallel_parsing 0 --min_chunk_bytes_for_parallel_parsing 13292184 --max_read_buffer_size 567702 --prefer_localhost_replica 0 --max_block_size 47246 --max_joined_block_size_rows 84215 --max_threads 3 --optimize_append_index 0 --optimize_if_chain_to_multiif 1 --optimize_if_transform_strings_to_enum 1 --optimize_read_in_order 0 --optimize_or_like_chain 1 --optimize_substitute_columns 1 --enable_multiple_prewhere_read_steps 0 --read_in_order_two_level_merge_threshold 21 --optimize_aggregation_in_order 1 --aggregation_in_order_max_block_bytes 49353075 --use_uncompressed_cache 1 --min_bytes_to_use_direct_io 8906370819 --min_bytes_to_use_mmap_io 1 --local_filesystem_read_method io_uring --remote_filesystem_read_method threadpool --local_filesystem_read_prefetch 1 --filesystem_cache_segments_batch_size 3 --read_from_filesystem_cache_if_exists_otherwise_bypass_cache 1 --throw_on_error_from_cache_on_write_operations 1 --remote_filesystem_read_prefetch 1 --allow_prefetched_read_pool_for_remote_filesystem 1 --filesystem_prefetch_max_memory_usage 32Mi --filesystem_prefetches_limit 10 --filesystem_prefetch_min_bytes_for_single_read_task 16Mi --filesystem_prefetch_step_marks 0 --filesystem_prefetch_step_bytes 0 --compile_aggregate_expressions 1 --compile_sort_description 0 --merge_tree_coarse_index_granularity 5 --optimize_distinct_in_order 0 --max_bytes_before_external_sort 5657754688 --max_bytes_before_external_group_by 0 --max_bytes_before_remerge_sort 2946336617 --min_compress_block_size 970276 --max_compress_block_size 2083340 --merge_tree_compact_parts_min_granules_to_multibuffer_read 51 --optimize_sorting_by_input_stream_properties 0 --http_response_buffer_size 798115 --http_wait_end_of_query False --enable_memory_bound_merging_of_aggregation_results 0 --min_count_to_compile_expression 3 --min_count_to_compile_aggregate_expression 0 --min_count_to_compile_sort_description 3 --session_timezone America/Hermosillo --use_page_cache_for_disks_without_file_cache False --page_cache_inject_eviction False --merge_tree_read_split_ranges_into_intersecting_and_non_intersecting_injection_probability 0.96 --prefer_external_sort_block_bytes 1 --cross_join_min_rows_to_compress 0 --cross_join_min_bytes_to_compress 1 --min_external_table_block_size_bytes 0 --max_parsing_threads 1 --optimize_functions_to_subcolumns 1 --parallel_replicas_local_plan 0 --query_plan_join_swap_table false --output_format_native_write_json_as_string 0 --enable_vertical_final 1
2025-04-01 21:55:30
2025-04-01 21:55:30 MergeTree settings used in test: --ratio_of_defaults_for_sparse_serialization 1.0 --prefer_fetch_merged_part_size_threshold 10359105369 --vertical_merge_algorithm_min_rows_to_activate 1000000 --vertical_merge_algorithm_min_columns_to_activate 1 --allow_vertical_merges_from_compact_to_wide_parts 1 --min_merge_bytes_to_use_direct_io 10173944700 --index_granularity_bytes 23745644 --merge_max_block_size 13542 --index_granularity 61046 --min_bytes_for_wide_part 0 --marks_compress_block_size 53491 --primary_key_compress_block_size 41397 --replace_long_file_name_to_hash 0 --max_file_name_length 121 --min_bytes_for_full_part_storage 536870912 --compact_parts_max_bytes_to_buffer 204053403 --compact_parts_max_granules_to_buffer 18 --compact_parts_merge_max_bytes_to_prefetch_part 19364281 --cache_populated_by_fetch 1 --concurrent_part_removal_threshold 0 --old_parts_lifetime 10 --prewarm_mark_cache 0 --use_const_adaptive_granularity 1 --enable_index_granularity_compression 1 --use_primary_key_cache 0 --prewarm_primary_key_cache 1
2025-04-01 21:55:30
2025-04-01 21:55:30 Database: test_3qsj8ayn
2025-04-01 21:55:38 03034_json_extract_variant: [ FAIL ] 0.00 sec.
2025-04-01 21:55:38 Reason: server died
2025-04-01 21:55:38 Server does not respond to health check
2025-04-01 21:55:38
2025-04-01 21:55:38 Process Process-8:
2025-04-01 21:55:38 Traceback (most recent call last):
2025-04-01 21:55:38 File "/usr/lib/python3.10/multiprocessing/process.py", line 314, in _bootstrap
2025-04-01 21:55:38 self.run()
2025-04-01 21:55:38 File "/usr/lib/python3.10/multiprocessing/process.py", line 108, in run
2025-04-01 21:55:38 self._target(*self._args, **self._kwargs)
2025-04-01 21:55:38 File "/usr/bin/clickhouse-test", line 2626, in run_tests_process
2025-04-01 21:55:38 return run_tests_array(*args, **kwargs)
2025-04-01 21:55:38 File "/usr/bin/clickhouse-test", line 2407, in run_tests_array
2025-04-01 21:55:38 stop_tests()
2025-04-01 21:55:38 File "/usr/bin/clickhouse-test", line 452, in stop_tests
2025-04-01 21:55:38 cleanup_child_processes(os.getpid())
2025-04-01 21:55:38 File "/usr/bin/clickhouse-test", line 440, in cleanup_child_processes
2025-04-01 21:55:38 child_pgid = os.getpgid(child)
2025-04-01 21:55:38 ProcessLookupError: [Errno 3] No such process
2025-04-01 21:55:38 Child processes of 1386:
2025-04-01 21:55:38 28025 /bin/sh -c pgrep --parent 1386 -a
2025-04-01 21:55:38 01668_test_toMonth_mysql_dialect: [ FAIL ] 0.00 sec.
2025-04-01 21:55:38 Reason: server died
2025-04-01 21:55:38 Server does not respond to health check
2025-04-01 21:55:38
2025-04-01 21:55:38 Process Process-7:
2025-04-01 21:55:38 02532_analyzer_aggregation_with_rollup: [ FAIL ] 0.00 sec.
2025-04-01 21:55:38 Reason: server died
2025-04-01 21:55:38 Server does not respond to health check
2025-04-01 21:55:38
2025-04-01 21:55:38 Traceback (most recent call last):
2025-04-01 21:55:38 File "/usr/lib/python3.10/multiprocessing/process.py", line 314, in _bootstrap
2025-04-01 21:55:38 self.run()
2025-04-01 21:55:38 File "/usr/lib/python3.10/multiprocessing/process.py", line 108, in run
2025-04-01 21:55:38 self._target(*self._args, **self._kwargs)
2025-04-01 21:55:38 File "/usr/bin/clickhouse-test", line 2626, in run_tests_process
2025-04-01 21:55:38 return run_tests_array(*args, **kwargs)
2025-04-01 21:55:38 File "/usr/bin/clickhouse-test", line 2407, in run_tests_array
2025-04-01 21:55:38 stop_tests()
2025-04-01 21:55:38 File "/usr/bin/clickhouse-test", line 452, in stop_tests
2025-04-01 21:55:38 cleanup_child_processes(os.getpid())
2025-04-01 21:55:38 File "/usr/bin/clickhouse-test", line 440, in cleanup_child_processes
2025-04-01 21:55:38 child_pgid = os.getpgid(child)
2025-04-01 21:55:38 ProcessLookupError: [Errno 3] No such process
2025-04-01 21:55:38 Child processes of 1383:
2025-04-01 21:55:38 28031 /bin/sh -c pgrep --parent 1383 -a
2025-04-01 21:55:38 01119_session_log: [ FAIL ] 0.00 sec.
2025-04-01 21:55:38 Reason: server died
2025-04-01 21:55:38 Server does not respond to health check
2025-04-01 21:55:38
2025-04-01 21:55:38 Process Process-10:
2025-04-01 21:55:38 Traceback (most recent call last):
2025-04-01 21:55:38 File "/usr/lib/python3.10/multiprocessing/process.py", line 314, in _bootstrap
2025-04-01 21:55:38 self.run()
2025-04-01 21:55:38 File "/usr/lib/python3.10/multiprocessing/process.py", line 108, in run
2025-04-01 21:55:38 self._target(*self._args, **self._kwargs)
2025-04-01 21:55:38 File "/usr/bin/clickhouse-test", line 2626, in run_tests_process
2025-04-01 21:55:38 return run_tests_array(*args, **kwargs)
2025-04-01 21:55:38 File "/usr/bin/clickhouse-test", line 2407, in run_tests_array
2025-04-01 21:55:38 stop_tests()
2025-04-01 21:55:38 File "/usr/bin/clickhouse-test", line 452, in stop_tests
2025-04-01 21:55:38 cleanup_child_processes(os.getpid())
2025-04-01 21:55:38 File "/usr/bin/clickhouse-test", line 440, in cleanup_child_processes
2025-04-01 21:55:38 child_pgid = os.getpgid(child)
2025-04-01 21:55:38 ProcessLookupError: [Errno 3] No such process
2025-04-01 21:55:38 Child processes of 1392:
2025-04-01 21:55:38 28037 /bin/sh -c pgrep --parent 1392 -a
2025-04-01 21:55:38 00717_low_cardinaliry_group_by: [ FAIL ] 0.00 sec.
2025-04-01 21:55:38 Reason: server died
2025-04-01 21:55:38 Server does not respond to health check
2025-04-01 21:55:38
2025-04-01 21:55:38 Process Process-6:
2025-04-01 21:55:38 Traceback (most recent call last):
2025-04-01 21:55:38 File "/usr/lib/python3.10/multiprocessing/process.py", line 314, in _bootstrap
2025-04-01 21:55:38 self.run()
2025-04-01 21:55:38 File "/usr/lib/python3.10/multiprocessing/process.py", line 108, in run
2025-04-01 21:55:38 self._target(*self._args, **self._kwargs)
2025-04-01 21:55:38 File "/usr/bin/clickhouse-test", line 2626, in run_tests_process
2025-04-01 21:55:38 return run_tests_array(*args, **kwargs)
2025-04-01 21:55:38 File "/usr/bin/clickhouse-test", line 2407, in run_tests_array
2025-04-01 21:55:38 stop_tests()
2025-04-01 21:55:38 File "/usr/bin/clickhouse-test", line 452, in stop_tests
2025-04-01 21:55:38 cleanup_child_processes(os.getpid())
2025-04-01 21:55:38 File "/usr/bin/clickhouse-test", line 440, in cleanup_child_processes
2025-04-01 21:55:38 child_pgid = os.getpgid(child)
2025-04-01 21:55:38 ProcessLookupError: [Errno 3] No such process
2025-04-01 21:55:38 Child processes of 1381:
2025-04-01 21:55:38 28040 /bin/sh -c pgrep --parent 1381 -a
2025-04-01 21:55:38 Process Process-9:
2025-04-01 21:55:38 Traceback (most recent call last):
2025-04-01 21:55:38 File "/usr/lib/python3.10/multiprocessing/process.py", line 314, in _bootstrap
2025-04-01 21:55:38 self.run()
2025-04-01 21:55:38 File "/usr/lib/python3.10/multiprocessing/process.py", line 108, in run
2025-04-01 21:55:38 self._target(*self._args, **self._kwargs)
2025-04-01 21:55:38 File "/usr/bin/clickhouse-test", line 2626, in run_tests_process
2025-04-01 21:55:38 return run_tests_array(*args, **kwargs)
2025-04-01 21:55:38 File "/usr/bin/clickhouse-test", line 2407, in run_tests_array
2025-04-01 21:55:38 stop_tests()
2025-04-01 21:55:38 File "/usr/bin/clickhouse-test", line 452, in stop_tests
2025-04-01 21:55:38 cleanup_child_processes(os.getpid())
2025-04-01 21:55:38 File "/usr/bin/clickhouse-test", line 440, in cleanup_child_processes
2025-04-01 21:55:38 child_pgid = os.getpgid(child)
2025-04-01 21:55:38 ProcessLookupError: [Errno 3] No such process
2025-04-01 21:55:38 Child processes of 1389:
2025-04-01 21:55:38 28047 /bin/sh -c pgrep --parent 1389 -a
2025-04-01 21:55:40 03149_analyzer_window_redefinition: [ FAIL ] 0.00 sec.
2025-04-01 21:55:40 Reason: server died
2025-04-01 21:55:40 Server does not respond to health check
2025-04-01 21:55:40
2025-04-01 21:55:40 Process Process-5:
2025-04-01 21:55:40 Traceback (most recent call last):
2025-04-01 21:55:40 File "/usr/lib/python3.10/multiprocessing/process.py", line 314, in _bootstrap
2025-04-01 21:55:40 self.run()
2025-04-01 21:55:40 File "/usr/lib/python3.10/multiprocessing/process.py", line 108, in run
2025-04-01 21:55:40 self._target(*self._args, **self._kwargs)
2025-04-01 21:55:40 File "/usr/bin/clickhouse-test", line 2626, in run_tests_process
2025-04-01 21:55:40 return run_tests_array(*args, **kwargs)
2025-04-01 21:55:40 File "/usr/bin/clickhouse-test", line 2407, in run_tests_array
2025-04-01 21:55:40 stop_tests()
2025-04-01 21:55:40 File "/usr/bin/clickhouse-test", line 452, in stop_tests
2025-04-01 21:55:40 cleanup_child_processes(os.getpid())
2025-04-01 21:55:40 File "/usr/bin/clickhouse-test", line 440, in cleanup_child_processes
2025-04-01 21:55:40 child_pgid = os.getpgid(child)
2025-04-01 21:55:40 ProcessLookupError: [Errno 3] No such process
2025-04-01 21:55:40 Child processes of 1379:
2025-04-01 21:55:40 28063 /bin/sh -c pgrep --parent 1379 -a
2025-04-01 21:55:55 02884_authentication_quota: [ FAIL ] 28.19 sec.
2025-04-01 21:55:55 Reason: return code: 210
2025-04-01 21:55:55 Code: 210. DB::NetException: Connection refused (localhost:9000). (NETWORK_ERROR)
2025-04-01 21:55:55
2025-04-01 21:55:55 Code: 210. DB::NetException: Connection refused (localhost:9000). (NETWORK_ERROR)
2025-04-01 21:55:55
2025-04-01 21:55:55 Code: 210. DB::NetException: Connection refused (localhost:9000). (NETWORK_ERROR)
2025-04-01 21:55:55
2025-04-01 21:55:55 Code: 210. DB::NetException: Connection refused (localhost:9000). (NETWORK_ERROR)
2025-04-01 21:55:55
2025-04-01 21:55:55 Code: 210. DB::NetException: Connection refused (localhost:9000). (NETWORK_ERROR)
2025-04-01 21:55:55
2025-04-01 21:55:55 Code: 210. DB::NetException: Connection refused (localhost:9000). (NETWORK_ERROR)
2025-04-01 21:55:55
2025-04-01 21:55:55 Code: 210. DB::NetException: Connection refused (localhost:9000). (NETWORK_ERROR)
2025-04-01 21:55:55
2025-04-01 21:55:55 Code: 210. DB::NetException: Connection refused (localhost:9000). (NETWORK_ERROR)
2025-04-01 21:55:55
2025-04-01 21:55:55 Code: 210. DB::NetException: Connection refused (localhost:9000). (NETWORK_ERROR)
2025-04-01 21:55:55
2025-04-01 21:55:55 Code: 210. DB::NetException: Connection refused (localhost:9000). (NETWORK_ERROR)
2025-04-01 21:55:55
2025-04-01 21:55:55 Code: 210. DB::NetException: Connection refused (localhost:9000). (NETWORK_ERROR)
2025-04-01 21:55:55
2025-04-01 21:55:55 Code: 210. DB::NetException: Connection refused (localhost:9000). (NETWORK_ERROR)
2025-04-01 21:55:55
2025-04-01 21:55:55 Code: 210. DB::NetException: Connection refused (localhost:9000). (NETWORK_ERROR)
2025-04-01 21:55:55
2025-04-01 21:55:55 Code: 210. DB::NetException: Connection refused (localhost:9000). (NETWORK_ERROR)
2025-04-01 21:55:55
2025-04-01 21:55:55 Code: 210. DB::NetException: Connection refused (localhost:9000). (NETWORK_ERROR)
2025-04-01 21:55:55
2025-04-01 21:55:55 Code: 210. DB::NetException: Connection refused (localhost:9000). (NETWORK_ERROR)
2025-04-01 21:55:55
2025-04-01 21:55:55 Code: 210. DB::NetException: Connection refused (localhost:9000). (NETWORK_ERROR)
2025-04-01 21:55:55
2025-04-01 21:55:55 Code: 210. DB::NetException: Connection refused (localhost:9000). (NETWORK_ERROR)
2025-04-01 21:55:55
2025-04-01 21:55:55 Code: 210. DB::NetException: Connection refused (localhost:9000). (NETWORK_ERROR)
2025-04-01 21:55:55
2025-04-01 21:55:55 Code: 210. DB::NetException: Connection refused (localhost:9000). (NETWORK_ERROR)
2025-04-01 21:55:55
2025-04-01 21:55:55 Code: 210. DB::NetException: Connection refused (localhost:9000). (NETWORK_ERROR)
2025-04-01 21:55:55
2025-04-01 21:55:55 Code: 210. DB::NetException: Connection refused (localhost:9000). (NETWORK_ERROR)
2025-04-01 21:55:55
2025-04-01 21:55:55 Code: 210. DB::NetException: Connection refused (localhost:9000). (NETWORK_ERROR)
2025-04-01 21:55:55
2025-04-01 21:55:55 Code: 210. DB::NetException: Connection refused (localhost:9000). (NETWORK_ERROR)
2025-04-01 21:55:55
2025-04-01 21:55:55 Code: 210. DB::NetException: Connection refused (localhost:9000). (NETWORK_ERROR)
2025-04-01 21:55:55
2025-04-01 21:55:55 Code: 210. DB::NetException: Connection refused (localhost:9000). (NETWORK_ERROR)
2025-04-01 21:55:55
2025-04-01 21:55:55 Code: 210. DB::NetException: Connection refused (localhost:9000). (NETWORK_ERROR)
2025-04-01 21:55:55
2025-04-01 21:55:55 Code: 210. DB::NetException: Connection refused (localhost:9000). (NETWORK_ERROR)
2025-04-01 21:55:55
2025-04-01 21:55:55 Code: 210. DB::NetException: Connection refused (localhost:9000). (NETWORK_ERROR)
2025-04-01 21:55:55
2025-04-01 21:55:55 Code: 210. DB::NetException: Connection refused (localhost:9000). (NETWORK_ERROR)
2025-04-01 21:55:55
2025-04-01 21:55:55 Code: 210. DB::NetException: Connection refused (localhost:9000). (NETWORK_ERROR)
2025-04-01 21:55:55
2025-04-01 21:55:55 , result:
2025-04-01 21:55:55
2025-04-01 21:55:55 > Drop the user, quota, and role if those were created.
2025-04-01 21:55:55 > Create the user with quota with the maximum single authentication attempt.
2025-04-01 21:55:55 > Check if the quota has been created.
2025-04-01 21:55:55 > Try to login to the user account with correct password
2025-04-01 21:55:55 > Login to the user account using the wrong password.
2025-04-01 21:55:55 > Quota is exceeded 1 >= 1. Login with correct password should fail.
2025-04-01 21:55:55 > Check the failed_sequential_authentications, max_failed_sequential_authentications fields.
2025-04-01 21:55:55 > Alter the quota with MAX FAILED SEQUENTIAL AUTHENTICATIONS = 4
2025-04-01 21:55:55 > Try to login to the user account with correct password
2025-04-01 21:55:55 > Successfull login should reset failed authentications counter. Check the failed_sequential_authentications, max_failed_sequential_authentications fields.
2025-04-01 21:55:55 > Login to the user account using the wrong password before exeeding the quota.
2025-04-01 21:55:55 > Also try to login with correct password. Quota should stay exceeded.
2025-04-01 21:55:55 > Check the failed_sequential_authentications, max_failed_sequential_authentications fields.
2025-04-01 21:55:55 > Reset the quota by increasing MAX FAILED SEQUENTIAL AUTHENTICATIONS and succesfull login
2025-04-01 21:55:55 > and check failed_sequential_authentications, max_failed_sequential_authentications.
2025-04-01 21:55:55 ---------------------------------------------------------------------------
2025-04-01 21:55:55 > Create the role with quota with the maximum single authentication attempt.
2025-04-01 21:55:55 > Try to login to the user account with correct password
2025-04-01 21:55:55 > Login to the user account using the wrong password.
2025-04-01 21:55:55 > Quota is exceeded 1 >= 1. Login with correct password should fail.
2025-04-01 21:55:55 > Check the failed_sequential_authentications, max_failed_sequential_authentications fields.
2025-04-01 21:55:55 > Alter the quota with MAX FAILED SEQUENTIAL AUTHENTICATIONS = 4
2025-04-01 21:55:55 > Try to login to the user account with correct password
2025-04-01 21:55:55 > Successfull login should reset failed authentications counter. Check the failed_sequential_authentications, max_failed_sequential_authentications fields.
2025-04-01 21:55:55 > Login to the user account using the wrong password before exeeding the quota.
2025-04-01 21:55:55 > Also try to login with correct password. Quota should stay exceeded.
2025-04-01 21:55:55 > Check the failed_sequential_authentications, max_failed_sequential_authentications fields.
2025-04-01 21:55:55 > Reset the quota by increasing MAX FAILED SEQUENTIAL AUTHENTICATIONS and succesfull login
2025-04-01 21:55:55 > and check failed_sequential_authentications, max_failed_sequential_authentications.
2025-04-01 21:55:55
2025-04-01 21:55:55 stdout:
2025-04-01 21:55:55 > Drop the user, quota, and role if those were created.
2025-04-01 21:55:55 > Create the user with quota with the maximum single authentication attempt.
2025-04-01 21:55:55 > Check if the quota has been created.
2025-04-01 21:55:55 > Try to login to the user account with correct password
2025-04-01 21:55:55 > Login to the user account using the wrong password.
2025-04-01 21:55:55 > Quota is exceeded 1 >= 1. Login with correct password should fail.
2025-04-01 21:55:55 > Check the failed_sequential_authentications, max_failed_sequential_authentications fields.
2025-04-01 21:55:55 > Alter the quota with MAX FAILED SEQUENTIAL AUTHENTICATIONS = 4
2025-04-01 21:55:55 > Try to login to the user account with correct password
2025-04-01 21:55:55 > Successfull login should reset failed authentications counter. Check the failed_sequential_authentications, max_failed_sequential_authentications fields.
2025-04-01 21:55:55 > Login to the user account using the wrong password before exeeding the quota.
2025-04-01 21:55:55 > Also try to login with correct password. Quota should stay exceeded.
2025-04-01 21:55:55 > Check the failed_sequential_authentications, max_failed_sequential_authentications fields.
2025-04-01 21:55:55 > Reset the quota by increasing MAX FAILED SEQUENTIAL AUTHENTICATIONS and succesfull login
2025-04-01 21:55:55 > and check failed_sequential_authentications, max_failed_sequential_authentications.
2025-04-01 21:55:55 ---------------------------------------------------------------------------
2025-04-01 21:55:55 > Create the role with quota with the maximum single authentication attempt.
2025-04-01 21:55:55 > Try to login to the user account with correct password
2025-04-01 21:55:55 > Login to the user account using the wrong password.
2025-04-01 21:55:55 > Quota is exceeded 1 >= 1. Login with correct password should fail.
2025-04-01 21:55:55 > Check the failed_sequential_authentications, max_failed_sequential_authentications fields.
2025-04-01 21:55:55 > Alter the quota with MAX FAILED SEQUENTIAL AUTHENTICATIONS = 4
2025-04-01 21:55:55 > Try to login to the user account with correct password
2025-04-01 21:55:55 > Successfull login should reset failed authentications counter. Check the failed_sequential_authentications, max_failed_sequential_authentications fields.
2025-04-01 21:55:55 > Login to the user account using the wrong password before exeeding the quota.
2025-04-01 21:55:55 > Also try to login with correct password. Quota should stay exceeded.
2025-04-01 21:55:55 > Check the failed_sequential_authentications, max_failed_sequential_authentications fields.
2025-04-01 21:55:55 > Reset the quota by increasing MAX FAILED SEQUENTIAL AUTHENTICATIONS and succesfull login
2025-04-01 21:55:55 > and check failed_sequential_authentications, max_failed_sequential_authentications.
2025-04-01 21:55:55
2025-04-01 21:55:55
2025-04-01 21:55:55 Settings used in the test: --max_insert_threads 2 --group_by_two_level_threshold 338922 --group_by_two_level_threshold_bytes 48304547 --distributed_aggregation_memory_efficient 0 --fsync_metadata 0 --output_format_parallel_formatting 1 --input_format_parallel_parsing 0 --min_chunk_bytes_for_parallel_parsing 14259833 --max_read_buffer_size 707319 --prefer_localhost_replica 1 --max_block_size 47702 --max_joined_block_size_rows 31407 --max_threads 1 --optimize_append_index 0 --optimize_if_chain_to_multiif 0 --optimize_if_transform_strings_to_enum 0 --optimize_read_in_order 1 --optimize_or_like_chain 1 --optimize_substitute_columns 1 --enable_multiple_prewhere_read_steps 1 --read_in_order_two_level_merge_threshold 5 --optimize_aggregation_in_order 1 --aggregation_in_order_max_block_bytes 11084502 --use_uncompressed_cache 1 --min_bytes_to_use_direct_io 5474605900 --min_bytes_to_use_mmap_io 9121897274 --local_filesystem_read_method io_uring --remote_filesystem_read_method read --local_filesystem_read_prefetch 0 --filesystem_cache_segments_batch_size 2 --read_from_filesystem_cache_if_exists_otherwise_bypass_cache 0 --throw_on_error_from_cache_on_write_operations 0 --remote_filesystem_read_prefetch 0 --allow_prefetched_read_pool_for_remote_filesystem 1 --filesystem_prefetch_max_memory_usage 64Mi --filesystem_prefetches_limit 10 --filesystem_prefetch_min_bytes_for_single_read_task 16Mi --filesystem_prefetch_step_marks 50 --filesystem_prefetch_step_bytes 0 --compile_aggregate_expressions 1 --compile_sort_description 0 --merge_tree_coarse_index_granularity 5 --optimize_distinct_in_order 0 --max_bytes_before_external_sort 10737418240 --max_bytes_before_external_group_by 0 --max_bytes_before_remerge_sort 2137450295 --min_compress_block_size 2604433 --max_compress_block_size 779737 --merge_tree_compact_parts_min_granules_to_multibuffer_read 15 --optimize_sorting_by_input_stream_properties 1 --http_response_buffer_size 10263932 --http_wait_end_of_query False --enable_memory_bound_merging_of_aggregation_results 1 --min_count_to_compile_expression 3 --min_count_to_compile_aggregate_expression 3 --min_count_to_compile_sort_description 0 --session_timezone Africa/Khartoum --use_page_cache_for_disks_without_file_cache True --page_cache_inject_eviction False --merge_tree_read_split_ranges_into_intersecting_and_non_intersecting_injection_probability 0.88 --prefer_external_sort_block_bytes 1 --cross_join_min_rows_to_compress 0 --cross_join_min_bytes_to_compress 0 --min_external_table_block_size_bytes 1 --max_parsing_threads 1 --optimize_functions_to_subcolumns 0 --parallel_replicas_local_plan 1 --query_plan_join_swap_table false --output_format_native_write_json_as_string 0 --enable_vertical_final 1
2025-04-01 21:55:55
2025-04-01 21:55:55 MergeTree settings used in test: --ratio_of_defaults_for_sparse_serialization 0.0 --prefer_fetch_merged_part_size_threshold 1 --vertical_merge_algorithm_min_rows_to_activate 1 --vertical_merge_algorithm_min_columns_to_activate 100 --allow_vertical_merges_from_compact_to_wide_parts 1 --min_merge_bytes_to_use_direct_io 6141083109 --index_granularity_bytes 28215756 --merge_max_block_size 19948 --index_granularity 44696 --min_bytes_for_wide_part 0 --marks_compress_block_size 67346 --primary_key_compress_block_size 14378 --replace_long_file_name_to_hash 0 --max_file_name_length 128 --min_bytes_for_full_part_storage 0 --compact_parts_max_bytes_to_buffer 343431166 --compact_parts_max_granules_to_buffer 221 --compact_parts_merge_max_bytes_to_prefetch_part 23808913 --cache_populated_by_fetch 0 --concurrent_part_removal_threshold 0 --old_parts_lifetime 480 --prewarm_mark_cache 0 --use_const_adaptive_granularity 1 --enable_index_granularity_compression 0 --use_primary_key_cache 0 --prewarm_primary_key_cache 1
2025-04-01 21:55:55
2025-04-01 21:55:55 Database: test_2ko0d8au
2025-04-01 21:56:05 01908_with_unknown_column: [ FAIL ] 0.00 sec.
2025-04-01 21:56:05 Reason: server died
2025-04-01 21:56:05 Server does not respond to health check
2025-04-01 21:56:05
2025-04-01 21:56:05 Process Process-4:
2025-04-01 21:56:05 Traceback (most recent call last):
2025-04-01 21:56:05 File "/usr/lib/python3.10/multiprocessing/process.py", line 314, in _bootstrap
2025-04-01 21:56:05 self.run()
2025-04-01 21:56:05 File "/usr/lib/python3.10/multiprocessing/process.py", line 108, in run
2025-04-01 21:56:05 self._target(*self._args, **self._kwargs)
2025-04-01 21:56:05 File "/usr/bin/clickhouse-test", line 2626, in run_tests_process
2025-04-01 21:56:05 return run_tests_array(*args, **kwargs)
2025-04-01 21:56:05 File "/usr/bin/clickhouse-test", line 2407, in run_tests_array
2025-04-01 21:56:05 stop_tests()
2025-04-01 21:56:05 File "/usr/bin/clickhouse-test", line 452, in stop_tests
2025-04-01 21:56:05 cleanup_child_processes(os.getpid())
2025-04-01 21:56:05 File "/usr/bin/clickhouse-test", line 440, in cleanup_child_processes
2025-04-01 21:56:05 child_pgid = os.getpgid(child)
2025-04-01 21:56:05 ProcessLookupError: [Errno 3] No such process
2025-04-01 21:56:05 Child processes of 1378:
2025-04-01 21:56:05 28092 /bin/sh -c pgrep --parent 1378 -a
2025-04-01 22:08:29 Hung check failed: [Errno 111] Connection refused
2025-04-01 22:08:29 Server died, terminating all processes...
2025-04-01 22:08:29 Running 139 stateless tests (MainProcess).
2025-04-01 22:08:29 Child processes of 1361:
2025-04-01 22:08:29 1364 python3 /usr/bin/clickhouse-test --testname --shard --zookeeper --check-zookeeper-session --hung-check --print-time --no-drop-if-fail --capture-client-stacktrace --queries /repo/tests/queries --test-runs 1 --hung-check --print-time --jobs 8 --run-by-hash-num 1 --run-by-hash-total 4 --report-logs-stats
2025-04-01 22:08:29 1370 python3 /usr/bin/clickhouse-test --testname --shard --zookeeper --check-zookeeper-session --hung-check --print-time --no-drop-if-fail --capture-client-stacktrace --queries /repo/tests/queries --test-runs 1 --hung-check --print-time --jobs 8 --run-by-hash-num 1 --run-by-hash-total 4 --report-logs-stats
2025-04-01 22:08:29 28100 /bin/sh -c pgrep --parent 1361 -a
2025-04-01 22:08:29 Traceback (most recent call last):
2025-04-01 22:08:29 File "/usr/bin/clickhouse-test", line 3716, in
2025-04-01 22:08:29 main(args)
2025-04-01 22:08:29 File "/usr/bin/clickhouse-test", line 3063, in main
2025-04-01 22:08:29 total_tests_run += do_run_tests(args.jobs, test_suite)
2025-04-01 22:08:29 File "/usr/bin/clickhouse-test", line 2697, in do_run_tests
2025-04-01 22:08:29 run_tests_array(
2025-04-01 22:08:29 File "/usr/bin/clickhouse-test", line 2343, in run_tests_array
2025-04-01 22:08:29 stop_tests()
2025-04-01 22:08:29 File "/usr/bin/clickhouse-test", line 452, in stop_tests
2025-04-01 22:08:29 cleanup_child_processes(os.getpid())
2025-04-01 22:08:29 File "/usr/bin/clickhouse-test", line 440, in cleanup_child_processes
2025-04-01 22:08:29 child_pgid = os.getpgid(child)
2025-04-01 22:08:29 ProcessLookupError: [Errno 3] No such process
+ set -e
Files in current directory
+ echo 'Files in current directory'
+ ls -la ./
total 127164
drwxr-xr-x 1 root root 4096 Apr 1 21:52 .
drwxr-xr-x 1 root root 4096 Apr 1 21:52 ..
lrwxrwxrwx 1 root root 7 Sep 11 2024 bin -> usr/bin
drwxr-xr-x 2 root root 4096 Apr 18 2022 boot
drwxr-xr-x 14 root root 3840 Apr 1 21:44 dev
-rwxr-xr-x 1 root root 0 Apr 1 21:44 .dockerenv
drwxr-xr-x 1 root root 4096 Apr 1 21:45 etc
drwxr-xr-x 2 root root 4096 Apr 18 2022 home
lrwxrwxrwx 1 root root 7 Sep 11 2024 lib -> usr/lib
lrwxrwxrwx 1 root root 9 Sep 11 2024 lib32 -> usr/lib32
lrwxrwxrwx 1 root root 9 Sep 11 2024 lib64 -> usr/lib64
lrwxrwxrwx 1 root root 10 Sep 11 2024 libx32 -> usr/libx32
-rwxr-xr-x 1 root root 26927256 Jan 15 20:53 mc
drwxr-xr-x 2 root root 4096 Sep 11 2024 media
-rwxr-xr-x 1 root root 103174296 Jan 15 20:53 minio
drwxr-xr-x 4 root root 4096 Apr 1 21:45 minio_data
drwxr-xr-x 2 root root 4096 Sep 11 2024 mnt
drwxr-xr-x 1 root root 4096 Jan 14 20:08 opt
-rw-r--r-- 1 root root 0 Feb 14 2024 .package-cache-mutate
drwxrwxr-x 2 1000 1000 4096 Apr 1 21:44 package_folder
dr-xr-xr-x 305 root root 0 Apr 1 21:44 proc
drwxrwxr-x 17 1000 1000 4096 Apr 1 21:41 repo
-rw-rw-r-- 1 root root 863 Jan 15 20:27 requirements.txt
drwx------ 1 root root 4096 Apr 1 21:47 root
drwxr-xr-x 1 root root 4096 Apr 1 21:45 run
lrwxrwxrwx 1 root root 8 Sep 11 2024 sbin -> usr/sbin
-rw-r--r-- 1 root root 747 Apr 1 21:45 script.gdb
-rwxrwxr-x 1 root root 10851 Jan 14 20:07 setup_export_logs.sh
drwxr-xr-x 2 root root 4096 Sep 11 2024 srv
dr-xr-xr-x 13 root root 0 Apr 1 21:44 sys
drwxrwxr-x 2 1000 1000 4096 Apr 1 21:45 test_output
drwxrwxrwt 1 root root 4096 Apr 1 22:08 tmp
drwxr-xr-x 1 root root 4096 Sep 11 2024 usr
drwxr-xr-x 1 root root 4096 Sep 11 2024 var
Files in root directory
+ echo 'Files in root directory'
+ ls -la /
total 127164
drwxr-xr-x 1 root root 4096 Apr 1 21:52 .
drwxr-xr-x 1 root root 4096 Apr 1 21:52 ..
lrwxrwxrwx 1 root root 7 Sep 11 2024 bin -> usr/bin
drwxr-xr-x 2 root root 4096 Apr 18 2022 boot
drwxr-xr-x 14 root root 3840 Apr 1 21:44 dev
-rwxr-xr-x 1 root root 0 Apr 1 21:44 .dockerenv
drwxr-xr-x 1 root root 4096 Apr 1 21:45 etc
drwxr-xr-x 2 root root 4096 Apr 18 2022 home
lrwxrwxrwx 1 root root 7 Sep 11 2024 lib -> usr/lib
lrwxrwxrwx 1 root root 9 Sep 11 2024 lib32 -> usr/lib32
lrwxrwxrwx 1 root root 9 Sep 11 2024 lib64 -> usr/lib64
lrwxrwxrwx 1 root root 10 Sep 11 2024 libx32 -> usr/libx32
-rwxr-xr-x 1 root root 26927256 Jan 15 20:53 mc
drwxr-xr-x 2 root root 4096 Sep 11 2024 media
-rwxr-xr-x 1 root root 103174296 Jan 15 20:53 minio
drwxr-xr-x 4 root root 4096 Apr 1 21:45 minio_data
drwxr-xr-x 2 root root 4096 Sep 11 2024 mnt
drwxr-xr-x 1 root root 4096 Jan 14 20:08 opt
-rw-r--r-- 1 root root 0 Feb 14 2024 .package-cache-mutate
drwxrwxr-x 2 1000 1000 4096 Apr 1 21:44 package_folder
dr-xr-xr-x 305 root root 0 Apr 1 21:44 proc
drwxrwxr-x 17 1000 1000 4096 Apr 1 21:41 repo
-rw-rw-r-- 1 root root 863 Jan 15 20:27 requirements.txt
drwx------ 1 root root 4096 Apr 1 21:47 root
drwxr-xr-x 1 root root 4096 Apr 1 21:45 run
lrwxrwxrwx 1 root root 8 Sep 11 2024 sbin -> usr/sbin
-rw-r--r-- 1 root root 747 Apr 1 21:45 script.gdb
-rwxrwxr-x 1 root root 10851 Jan 14 20:07 setup_export_logs.sh
drwxr-xr-x 2 root root 4096 Sep 11 2024 srv
dr-xr-xr-x 13 root root 0 Apr 1 21:44 sys
drwxrwxr-x 2 1000 1000 4096 Apr 1 21:45 test_output
drwxrwxrwt 1 root root 4096 Apr 1 22:08 tmp
drwxr-xr-x 1 root root 4096 Sep 11 2024 usr
drwxr-xr-x 1 root root 4096 Sep 11 2024 var
+ clickhouse-client -q 'system flush logs'
Code: 210. DB::NetException: Connection refused (localhost:9000). (NETWORK_ERROR)
Detach all logs replication
+ :
+ stop_logs_replication
+ echo 'Detach all logs replication'
+ clickhouse-client --query 'select database||'\''.'\''||table from system.tables where database = '\''system'\'' and (table like '\''%_sender'\'' or table like '\''%_watcher'\'')'
+ tee /dev/stderr
+ timeout --preserve-status --signal TERM --kill-after 5m 15m xargs -n1 -r -i clickhouse-client --query 'drop table {}'
xargs: warning: options --max-args and --replace/-I/-i are mutually exclusive, ignoring previous --max-args value
Code: 210. DB::NetException: Connection refused (localhost:9000). (NETWORK_ERROR)
+ logs_saver_client_options='--max_block_size 8192 --max_memory_usage 10G --max_threads 1 --max_result_rows 0 --max_result_bytes 0 --max_bytes_to_read 0'
+ failed_to_save_logs=0
+ for table in query_log zookeeper_log trace_log transactions_info_log metric_log blob_storage_log error_log query_metric_log
+ clickhouse-client --max_block_size 8192 --max_memory_usage 10G --max_threads 1 --max_result_rows 0 --max_result_bytes 0 --max_bytes_to_read 0 -q 'select * from system.query_log into outfile '\''/test_output/query_log.tsv.zst'\'' format TSVWithNamesAndTypes'
Code: 210. DB::NetException: Connection refused (localhost:9000). (NETWORK_ERROR)
+ failed_to_save_logs=1
+ [[ 0 -eq 1 ]]
+ [[ 0 -eq 1 ]]
+ for table in query_log zookeeper_log trace_log transactions_info_log metric_log blob_storage_log error_log query_metric_log
+ clickhouse-client --max_block_size 8192 --max_memory_usage 10G --max_threads 1 --max_result_rows 0 --max_result_bytes 0 --max_bytes_to_read 0 -q 'select * from system.zookeeper_log into outfile '\''/test_output/zookeeper_log.tsv.zst'\'' format TSVWithNamesAndTypes'
Code: 210. DB::NetException: Connection refused (localhost:9000). (NETWORK_ERROR)
+ failed_to_save_logs=1
+ [[ 0 -eq 1 ]]
+ [[ 0 -eq 1 ]]
+ for table in query_log zookeeper_log trace_log transactions_info_log metric_log blob_storage_log error_log query_metric_log
+ clickhouse-client --max_block_size 8192 --max_memory_usage 10G --max_threads 1 --max_result_rows 0 --max_result_bytes 0 --max_bytes_to_read 0 -q 'select * from system.trace_log into outfile '\''/test_output/trace_log.tsv.zst'\'' format TSVWithNamesAndTypes'
Code: 210. DB::NetException: Connection refused (localhost:9000). (NETWORK_ERROR)
+ failed_to_save_logs=1
+ [[ 0 -eq 1 ]]
+ [[ 0 -eq 1 ]]
+ for table in query_log zookeeper_log trace_log transactions_info_log metric_log blob_storage_log error_log query_metric_log
+ clickhouse-client --max_block_size 8192 --max_memory_usage 10G --max_threads 1 --max_result_rows 0 --max_result_bytes 0 --max_bytes_to_read 0 -q 'select * from system.transactions_info_log into outfile '\''/test_output/transactions_info_log.tsv.zst'\'' format TSVWithNamesAndTypes'
Code: 210. DB::NetException: Connection refused (localhost:9000). (NETWORK_ERROR)
+ failed_to_save_logs=1
+ [[ 0 -eq 1 ]]
+ [[ 0 -eq 1 ]]
+ for table in query_log zookeeper_log trace_log transactions_info_log metric_log blob_storage_log error_log query_metric_log
+ clickhouse-client --max_block_size 8192 --max_memory_usage 10G --max_threads 1 --max_result_rows 0 --max_result_bytes 0 --max_bytes_to_read 0 -q 'select * from system.metric_log into outfile '\''/test_output/metric_log.tsv.zst'\'' format TSVWithNamesAndTypes'
Code: 210. DB::NetException: Connection refused (localhost:9000). (NETWORK_ERROR)
+ failed_to_save_logs=1
+ [[ 0 -eq 1 ]]
+ [[ 0 -eq 1 ]]
+ for table in query_log zookeeper_log trace_log transactions_info_log metric_log blob_storage_log error_log query_metric_log
+ clickhouse-client --max_block_size 8192 --max_memory_usage 10G --max_threads 1 --max_result_rows 0 --max_result_bytes 0 --max_bytes_to_read 0 -q 'select * from system.blob_storage_log into outfile '\''/test_output/blob_storage_log.tsv.zst'\'' format TSVWithNamesAndTypes'
Code: 210. DB::NetException: Connection refused (localhost:9000). (NETWORK_ERROR)
+ failed_to_save_logs=1
+ [[ 0 -eq 1 ]]
+ [[ 0 -eq 1 ]]
+ for table in query_log zookeeper_log trace_log transactions_info_log metric_log blob_storage_log error_log query_metric_log
+ clickhouse-client --max_block_size 8192 --max_memory_usage 10G --max_threads 1 --max_result_rows 0 --max_result_bytes 0 --max_bytes_to_read 0 -q 'select * from system.error_log into outfile '\''/test_output/error_log.tsv.zst'\'' format TSVWithNamesAndTypes'
Code: 210. DB::NetException: Connection refused (localhost:9000). (NETWORK_ERROR)
+ failed_to_save_logs=1
+ [[ 0 -eq 1 ]]
+ [[ 0 -eq 1 ]]
+ for table in query_log zookeeper_log trace_log transactions_info_log metric_log blob_storage_log error_log query_metric_log
+ clickhouse-client --max_block_size 8192 --max_memory_usage 10G --max_threads 1 --max_result_rows 0 --max_result_bytes 0 --max_bytes_to_read 0 -q 'select * from system.query_metric_log into outfile '\''/test_output/query_metric_log.tsv.zst'\'' format TSVWithNamesAndTypes'
Code: 210. DB::NetException: Connection refused (localhost:9000). (NETWORK_ERROR)
+ failed_to_save_logs=1
+ [[ 0 -eq 1 ]]
+ [[ 0 -eq 1 ]]
+ sleep 1
+ clickhouse-client -q 'SYSTEM FLUSH ASYNC INSERT QUEUE'
Code: 210. DB::NetException: Connection refused (localhost:9000). (NETWORK_ERROR)
+ :
+ clickhouse-client --max_block_size 8192 --max_memory_usage 10G --max_threads 1 --max_result_rows 0 --max_result_bytes 0 --max_bytes_to_read 0 -q 'SELECT log FROM minio_audit_logs ORDER BY log.time INTO OUTFILE '\''/test_output/minio_audit_logs.jsonl.zst'\'' FORMAT JSONEachRow'
Code: 210. DB::NetException: Connection refused (localhost:9000). (NETWORK_ERROR)
+ :
+ clickhouse-client --max_block_size 8192 --max_memory_usage 10G --max_threads 1 --max_result_rows 0 --max_result_bytes 0 --max_bytes_to_read 0 -q 'SELECT log FROM minio_server_logs ORDER BY log.time INTO OUTFILE '\''/test_output/minio_server_logs.jsonl.zst'\'' FORMAT JSONEachRow'
Code: 210. DB::NetException: Connection refused (localhost:9000). (NETWORK_ERROR)
+ :
+ sudo clickhouse stop
/var/run/clickhouse-server/clickhouse-server.pid file exists and contains pid = 414.
The process with pid = 414 does not exist.
+ [[ 0 -eq 1 ]]
+ [[ 0 -eq 1 ]]
+ kill 1337
+ rg -Fa '' /var/log/clickhouse-server/clickhouse-server.log
API: SYSTEM.config
Time: 22:08:33 UTC 04/01/2025
DeploymentID: a5e78399-5349-4795-98da-03485aa5dbf0
Error: unable to send webhook log entry to 'minio-http-audit-ch_audit_webhook' err 'http://localhost:8123/?async_insert=1&wait_for_async_insert=0&async_insert_busy_timeout_min_ms=5000&async_insert_busy_timeout_max_ms=5000&async_insert_max_query_number=1000&async_insert_max_data_size=10485760&date_time_input_format=best_effort&query=INSERT%20INTO%20minio_audit_logs%20FORMAT%20JSONAsObject returned 'Post "http://localhost:8123/?async_insert=1&wait_for_async_insert=0&async_insert_busy_timeout_min_ms=5000&async_insert_busy_timeout_max_ms=5000&async_insert_max_query_number=1000&async_insert_max_data_size=10485760&date_time_input_format=best_effort&query=INSERT%20INTO%20minio_audit_logs%20FORMAT%20JSONAsObject": dial tcp 127.0.0.1:8123: connect: connection refused', please check your endpoint configuration' (*fmt.wrapError)
4: internal/logger/logonce.go:64:logger.(*logOnceType).logOnceConsoleIf()
3: internal/logger/logonce.go:157:logger.LogOnceConsoleIf()
2: cmd/logging.go:132:cmd.configLogOnceConsoleIf()
1: internal/logger/target/http/http.go:416:http.(*Target).startQueueProcessor()
2025.04.01 21:54:41.649760 [ 4059 ] {36738c40-c9bd-49ff-8c5d-a6cb75e0448d} : Logical error: 'Replica info is not initialized'.
2025.04.01 21:54:42.009370 [ 4059 ] {36738c40-c9bd-49ff-8c5d-a6cb75e0448d} : Stack trace (when copying this message, always include the lines below):
2025.04.01 21:54:42.013591 [ 26802 ] {} BaseDaemon: ########## Short fault info ############
2025.04.01 21:54:42.014148 [ 26802 ] {} BaseDaemon: (version 24.12.2.20221.altinityantalya (altinity build), build id: E3EBE392F6E30F869D3C8A7C787AA68127B123A4, git hash: 82252d159dc02cab0f366aaa5691adc1545dd11d, architecture: x86_64) (from thread 4059) Received signal 6
2025.04.01 21:54:42.014544 [ 26802 ] {} BaseDaemon: Signal description: Aborted
2025.04.01 21:54:42.014843 [ 26802 ] {} BaseDaemon:
2025.04.01 21:54:42.015115 [ 26802 ] {} BaseDaemon: Stack trace: 0x000056504274ba5e 0x00005650431471b7 0x000056502a871bae 0x00007fab77dfb520 0x00007fab77e4f9fd 0x00007fab77dfb476 0x00007fab77de17f3 0x000056504269d48b 0x000056504269ee61 0x000056502a8991d5 0x000056502a8c0804 0x0000565056af43cd 0x0000565056aed488 0x0000565056af1dc3 0x00005650623e1925 0x00005650616b4571 0x00005650623e0fb0 0x000056506171f8b1 0x00005650616eab14 0x00005650616e88c3 0x00005650616e82f5 0x00005650616e5de5 0x00005650429422ca 0x0000565042954a64 0x00007fab77e4dac3 0x00007fab77edf850
2025.04.01 21:54:42.015418 [ 26802 ] {} BaseDaemon: ########################################
2025.04.01 21:54:42.016232 [ 26802 ] {} BaseDaemon: (version 24.12.2.20221.altinityantalya (altinity build), build id: E3EBE392F6E30F869D3C8A7C787AA68127B123A4, git hash: 82252d159dc02cab0f366aaa5691adc1545dd11d) (from thread 4059) (query_id: 36738c40-c9bd-49ff-8c5d-a6cb75e0448d) (query: INSERT INTO distributed_01099_b SELECT * FROM urlCluster('test_cluster_two_shards', 'http://localhost:8123/?query=select+{1,2,3}+format+TSV', 'TSV', 's String');) Received signal Aborted (6)
2025.04.01 21:54:42.018028 [ 26802 ] {} BaseDaemon:
2025.04.01 21:54:42.018491 [ 26802 ] {} BaseDaemon: Stack trace: 0x000056504274ba5e 0x00005650431471b7 0x000056502a871bae 0x00007fab77dfb520 0x00007fab77e4f9fd 0x00007fab77dfb476 0x00007fab77de17f3 0x000056504269d48b 0x000056504269ee61 0x000056502a8991d5 0x000056502a8c0804 0x0000565056af43cd 0x0000565056aed488 0x0000565056af1dc3 0x00005650623e1925 0x00005650616b4571 0x00005650623e0fb0 0x000056506171f8b1 0x00005650616eab14 0x00005650616e88c3 0x00005650616e82f5 0x00005650616e5de5 0x00005650429422ca 0x0000565042954a64 0x00007fab77e4dac3 0x00007fab77edf850
2025.04.01 21:54:42.342953 [ 26802 ] {} BaseDaemon: 0.0. inlined from ./build_docker/./src/Common/StackTrace.cpp:381: StackTrace::tryCapture()
2025.04.01 21:54:42.343710 [ 26802 ] {} BaseDaemon: 0. ./build_docker/./src/Common/StackTrace.cpp:350: StackTrace::StackTrace(ucontext_t const&) @ 0x000000002077fa5e
2025.04.01 21:54:42.818722 [ 26802 ] {} BaseDaemon: 1. ./build_docker/./src/Common/SignalHandlers.cpp:102: signalHandler(int, siginfo_t*, void*) @ 0x000000002117b1b7
2025.04.01 21:54:46.216005 [ 26802 ] {} BaseDaemon: 2. SignalAction(int, void*, void*) @ 0x00000000088a5bae
2025.04.01 21:54:46.216211 [ 26802 ] {} BaseDaemon: 3. ? @ 0x00007fab77dfb520
2025.04.01 21:54:46.219608 [ 26802 ] {} BaseDaemon: 4. ? @ 0x00007fab77e4f9fd
2025.04.01 21:54:46.219787 [ 26802 ] {} BaseDaemon: 5. ? @ 0x00007fab77dfb476
2025.04.01 21:54:46.219899 [ 26802 ] {} BaseDaemon: 6. ? @ 0x00007fab77de17f3
2025.04.01 21:54:46.819038 [ 26802 ] {} BaseDaemon: 7. ./build_docker/./src/Common/Exception.cpp:48: DB::abortOnFailedAssertion(String const&, void* const*, unsigned long, unsigned long) @ 0x00000000206d148b
2025.04.01 21:54:47.419382 [ 26802 ] {} BaseDaemon: 8.0. inlined from ./build_docker/./src/Common/Exception.cpp:70: DB::handle_error_code(String const&, int, bool, std::vector> const&)
2025.04.01 21:54:47.419544 [ 26802 ] {} BaseDaemon: 8. ./build_docker/./src/Common/Exception.cpp:111: DB::Exception::Exception(DB::Exception::MessageMasked&&, int, bool) @ 0x00000000206d2e61
2025.04.01 21:54:47.702502 [ 26802 ] {} BaseDaemon: 9. DB::Exception::Exception(PreformattedMessage&&, int) @ 0x00000000088cd1d5
2025.04.01 21:54:47.986515 [ 26802 ] {} BaseDaemon: 10. DB::Exception::Exception<>(int, FormatStringHelperImpl<>) @ 0x00000000088f4804
2025.04.01 21:54:49.350065 [ 26802 ] {} BaseDaemon: 11. ./build_docker/./src/QueryPipeline/RemoteQueryExecutor.cpp:727: DB::RemoteQueryExecutor::processReadTaskRequest() @ 0x0000000034b283cd
2025.04.01 21:54:50.553569 [ 26802 ] {} BaseDaemon: 12. ./build_docker/./src/QueryPipeline/RemoteQueryExecutor.cpp:623: DB::RemoteQueryExecutor::processPacket(DB::Packet) @ 0x0000000034b21488
2025.04.01 21:54:51.825748 [ 26802 ] {} BaseDaemon: 13. ./build_docker/./src/QueryPipeline/RemoteQueryExecutor.cpp:562: DB::RemoteQueryExecutor::readAsync() @ 0x0000000034b25dc3
2025.04.01 21:54:52.214273 [ 26802 ] {} BaseDaemon: 14. ./build_docker/./src/Processors/Sources/RemoteSource.cpp:182: DB::RemoteSource::tryGenerate() @ 0x0000000040415925
2025.04.01 21:54:52.460221 [ 26802 ] {} BaseDaemon: 15. ./build_docker/./src/Processors/ISource.cpp:108: DB::ISource::work() @ 0x000000003f6e8571
2025.04.01 21:54:52.755429 [ 26802 ] {} BaseDaemon: 16. ./build_docker/./src/Processors/Sources/RemoteSource.cpp:134: DB::RemoteSource::work() @ 0x0000000040414fb0
2025.04.01 21:54:52.926172 [ 26802 ] {} BaseDaemon: 17.0. inlined from ./build_docker/./src/Processors/Executors/ExecutionThreadContext.cpp:49: DB::executeJob(DB::ExecutingGraph::Node*, DB::ReadProgressCallback*)
2025.04.01 21:54:52.926340 [ 26802 ] {} BaseDaemon: 17. ./build_docker/./src/Processors/Executors/ExecutionThreadContext.cpp:98: DB::ExecutionThreadContext::executeTask() @ 0x000000003f7538b1
2025.04.01 21:54:53.427953 [ 26802 ] {} BaseDaemon: 18. ./build_docker/./src/Processors/Executors/PipelineExecutor.cpp:290: DB::PipelineExecutor::executeStepImpl(unsigned long, std::atomic*) @ 0x000000003f71eb14
2025.04.01 21:54:53.771735 [ 26802 ] {} BaseDaemon: 19.0. inlined from ./build_docker/./src/Processors/Executors/PipelineExecutor.cpp:256: DB::PipelineExecutor::executeSingleThread(unsigned long)
2025.04.01 21:54:53.771998 [ 26802 ] {} BaseDaemon: 19. ./build_docker/./src/Processors/Executors/PipelineExecutor.cpp:442: DB::PipelineExecutor::executeImpl(unsigned long, bool) @ 0x000000003f71c8c3
2025.04.01 21:54:54.124930 [ 26802 ] {} BaseDaemon: 20. ./build_docker/./src/Processors/Executors/PipelineExecutor.cpp:127: DB::PipelineExecutor::execute(unsigned long, bool) @ 0x000000003f71c2f5
2025.04.01 21:54:54.311733 [ 26802 ] {} BaseDaemon: 21.0. inlined from ./build_docker/./src/Processors/Executors/CompletedPipelineExecutor.cpp:49: DB::threadFunction(DB::CompletedPipelineExecutor::Data&, std::shared_ptr, unsigned long, bool)
2025.04.01 21:54:54.311885 [ 26802 ] {} BaseDaemon: 21.1. inlined from ./build_docker/./src/Processors/Executors/CompletedPipelineExecutor.cpp:89: operator()
2025.04.01 21:54:54.311992 [ 26802 ] {} BaseDaemon: 21.2. inlined from ./contrib/llvm-project/libcxx/include/__functional/invoke.h:394: ?
2025.04.01 21:54:54.312152 [ 26802 ] {} BaseDaemon: 21.3. inlined from ./contrib/llvm-project/libcxx/include/tuple:1789: _ZNSt3__118__apply_tuple_implB6v15007IRZN2DB25CompletedPipelineExecutor7executeEvE3$_0RNS_5tupleIJEEETpTnmJEEEDcOT_OT0_NS_15__tuple_indicesIJXspT1_EEEE
2025.04.01 21:54:54.312328 [ 26802 ] {} BaseDaemon: 21.4. inlined from ./contrib/llvm-project/libcxx/include/tuple:1798: decltype(auto) std::apply[abi:v15007]&>(DB::CompletedPipelineExecutor::execute()::$_0&, std::tuple<>&)
2025.04.01 21:54:54.312425 [ 26802 ] {} BaseDaemon: 21.5. inlined from ./src/Common/ThreadPool.h:311: operator()
2025.04.01 21:54:54.312502 [ 26802 ] {} BaseDaemon: 21.6. inlined from ./contrib/llvm-project/libcxx/include/__functional/invoke.h:394: ?
2025.04.01 21:54:54.312579 [ 26802 ] {} BaseDaemon: 21.7. inlined from ./contrib/llvm-project/libcxx/include/__functional/invoke.h:479: ?
2025.04.01 21:54:54.312669 [ 26802 ] {} BaseDaemon: 21.8. inlined from ./contrib/llvm-project/libcxx/include/__functional/function.h:235: ?
2025.04.01 21:54:54.312752 [ 26802 ] {} BaseDaemon: 21. ./contrib/llvm-project/libcxx/include/__functional/function.h:716: ? @ 0x000000003f719de5
2025.04.01 21:54:54.589931 [ 26802 ] {} BaseDaemon: 22.0. inlined from ./contrib/llvm-project/libcxx/include/__functional/function.h:848: ?
2025.04.01 21:54:54.590094 [ 26802 ] {} BaseDaemon: 22.1. inlined from ./contrib/llvm-project/libcxx/include/__functional/function.h:1197: ?
2025.04.01 21:54:54.590199 [ 26802 ] {} BaseDaemon: 22. ./build_docker/./src/Common/ThreadPool.cpp:785: ThreadPoolImpl::ThreadFromThreadPool::worker() @ 0x00000000209762ca
2025.04.01 21:54:55.076568 [ 26802 ] {} BaseDaemon: 23.0. inlined from ./contrib/llvm-project/libcxx/include/__functional/invoke.h:359: ?
2025.04.01 21:54:55.077147 [ 26802 ] {} BaseDaemon: 23.1. inlined from ./contrib/llvm-project/libcxx/include/thread:284: void std::__thread_execute[abi:v15007]>, void (ThreadPoolImpl::ThreadFromThreadPool::*)(), ThreadPoolImpl::ThreadFromThreadPool*, 2ul>(std::tuple>, void (ThreadPoolImpl::ThreadFromThreadPool::*)(), ThreadPoolImpl::ThreadFromThreadPool*>&, std::__tuple_indices<2ul>)
2025.04.01 21:54:55.077380 [ 26802 ] {} BaseDaemon: 23. ./contrib/llvm-project/libcxx/include/thread:295: void* std::__thread_proxy[abi:v15007]>, void (ThreadPoolImpl::ThreadFromThreadPool::*)(), ThreadPoolImpl::ThreadFromThreadPool*>>(void*) @ 0x0000000020988a64
2025.04.01 21:54:55.077615 [ 26802 ] {} BaseDaemon: 24. ? @ 0x00007fab77e4dac3
2025.04.01 21:54:55.077747 [ 26802 ] {} BaseDaemon: 25. ? @ 0x00007fab77edf850
2025.04.01 21:55:05.231487 [ 26802 ] {} BaseDaemon: Integrity check of the executable successfully passed (checksum: 003D0F8422331117266F6835F6AB47FF)
2025.04.01 21:55:14.873946 [ 26802 ] {} BaseDaemon: This ClickHouse version is not official and should be upgraded to the official build.
2025.04.01 21:55:14.875484 [ 26802 ] {} BaseDaemon: Changed settings: min_compress_block_size = 2676963, max_compress_block_size = 1361695, max_block_size = 23650, min_external_table_block_size_bytes = 100000000, max_joined_block_size_rows = 26715, max_insert_threads = 1, max_threads = 2, max_parsing_threads = 1, max_read_buffer_size = 971032, connect_timeout_with_failover_ms = 2000, connect_timeout_with_failover_secure_ms = 3000, idle_connection_timeout = 36000, s3_max_get_rps = 1000000, s3_max_get_burst = 2000000, s3_max_put_rps = 1000000, s3_max_put_burst = 2000000, s3_check_objects_after_upload = true, use_uncompressed_cache = true, max_remote_read_network_bandwidth = 1000000000000, max_remote_write_network_bandwidth = 1000000000000, max_local_read_bandwidth = 1000000000000, max_local_write_bandwidth = 1000000000000, stream_like_engine_allow_direct_select = true, enable_multiple_prewhere_read_steps = false, replication_wait_for_inactive_replica_timeout = 30, min_count_to_compile_expression = 0, min_count_to_compile_aggregate_expression = 0, compile_sort_description = false, group_by_two_level_threshold = 1000000, group_by_two_level_threshold_bytes = 1, enable_memory_bound_merging_of_aggregation_results = false, allow_nonconst_timezone_arguments = true, parallel_distributed_insert_select = 1, min_chunk_bytes_for_parallel_parsing = 6692138, merge_tree_coarse_index_granularity = 28, min_bytes_to_use_direct_io = 10737418240, min_bytes_to_use_mmap_io = 8983766593, log_queries = true, insert_quorum_timeout = 60000, merge_tree_read_split_ranges_into_intersecting_and_non_intersecting_injection_probability = 0.9200000166893005, http_response_buffer_size = 3906084, fsync_metadata = true, query_plan_join_swap_table = true, http_send_timeout = 60., http_receive_timeout = 60., use_index_for_in_with_subqueries_max_values = 1000000000, opentelemetry_start_trace_probability = 0.10000000149011612, enable_vertical_final = false, max_rows_to_read = 20000000, max_bytes_to_read = 1000000000000, max_bytes_to_read_leaf = 1000000000000, max_rows_to_group_by = 10000000000, max_bytes_before_external_group_by = 0, max_rows_to_sort = 10000000000, max_bytes_to_sort = 10000000000, prefer_external_sort_block_bytes = 1, max_bytes_before_external_sort = 10737418240, max_bytes_before_remerge_sort = 2611551233, max_result_rows = 1000000000, max_result_bytes = 1000000000, max_execution_time = 600., max_execution_time_leaf = 600., max_execution_speed = 100000000000, max_execution_speed_bytes = 10000000000000, timeout_before_checking_execution_speed = 300., max_estimated_execution_time = 600., max_columns_to_read = 20000, max_temporary_columns = 20000, max_temporary_non_const_columns = 20000, max_rows_in_set = 10000000000, max_bytes_in_set = 10000000000, max_rows_in_join = 10000000000, max_bytes_in_join = 10000000000, cross_join_min_rows_to_compress = 100000000, cross_join_min_bytes_to_compress = 0, max_rows_to_transfer = 1000000000, max_bytes_to_transfer = 1000000000, max_rows_in_distinct = 10000000000, max_bytes_in_distinct = 10000000000, max_memory_usage = 5000000000, max_memory_usage_for_user = 32000000000, max_untracked_memory = 1048576, memory_profiler_step = 1048576, max_network_bandwidth = 100000000000, max_network_bytes = 1000000000000, max_network_bandwidth_for_user = 100000000000, max_network_bandwidth_for_all_users = 100000000000, max_temporary_data_on_disk_size_for_user = 100000000000, max_temporary_data_on_disk_size_for_query = 100000000000, max_backup_bandwidth = 100000000000, log_comment = '01099_parallel_distributed_insert_select.sql', send_logs_level = 'error', prefer_localhost_replica = false, optimize_read_in_order = false, aggregation_in_order_max_block_bytes = 20435763, read_in_order_two_level_merge_threshold = 93, max_hyperscan_regexp_length = 1000000, max_hyperscan_regexp_total_length = 10000000, allow_introspection_functions = true, database_atomic_wait_for_drop_and_detach_synchronously = true, optimize_if_transform_strings_to_enum = true, optimize_substitute_columns = true, query_cache_max_size_in_bytes = 10000000, query_cache_max_entries = 100000, distributed_ddl_entry_format_version = 6, external_storage_max_read_rows = 10000000000, external_storage_max_read_bytes = 10000000000, local_filesystem_read_method = 'read', local_filesystem_read_prefetch = true, merge_tree_min_bytes_per_task_for_remote_reading = 1048576, merge_tree_compact_parts_min_granules_to_multibuffer_read = 31, async_insert_busy_timeout_max_ms = 5000, enable_filesystem_cache = true, enable_filesystem_cache_on_write_operations = true, read_from_filesystem_cache_if_exists_otherwise_bypass_cache = true, throw_on_error_from_cache_on_write_operations = true, filesystem_cache_segments_batch_size = 3, use_page_cache_for_disks_without_file_cache = true, load_marks_asynchronously = true, allow_prefetched_read_pool_for_remote_filesystem = true, allow_prefetched_read_pool_for_local_filesystem = false, filesystem_prefetch_step_bytes = 104857600, filesystem_prefetch_step_marks = 50, filesystem_prefetch_max_memory_usage = 67108864, filesystem_prefetches_limit = 10, max_streams_for_merge_tree_reading = 1000, insert_keeper_max_retries = 100, insert_keeper_retry_initial_backoff_ms = 1, insert_keeper_retry_max_backoff_ms = 10, insert_keeper_fault_injection_probability = 0.009999999776482582, allow_experimental_parallel_reading_from_replicas = 0, parallel_replicas_local_plan = false, session_timezone = 'Mexico/BajaSur'
2025.04.01 21:55:18.725406 [ 410 ] {} Application: Child process was terminated by signal 6.
+ rg -A50 -Fa ============ /var/log/clickhouse-server/stderr.log
+ :
+ data_path_config=--path=/var/lib/clickhouse/
+ zstd --threads=0
+ [[ -n '' ]]
+ '[' 1 -ne 0 ']'
+ for table in query_log zookeeper_log trace_log transactions_info_log metric_log blob_storage_log error_log query_metric_log
+ clickhouse-local --max_block_size 8192 --max_memory_usage 10G --max_threads 1 --max_result_rows 0 --max_result_bytes 0 --max_bytes_to_read 0 --path=/var/lib/clickhouse/ --only-system-tables --stacktrace -q 'select * from system.query_log format TSVWithNamesAndTypes'
+ zstd --threads=0
+ [[ 0 -eq 1 ]]
+ [[ 0 -eq 1 ]]
+ for table in query_log zookeeper_log trace_log transactions_info_log metric_log blob_storage_log error_log query_metric_log
+ zstd --threads=0
+ clickhouse-local --max_block_size 8192 --max_memory_usage 10G --max_threads 1 --max_result_rows 0 --max_result_bytes 0 --max_bytes_to_read 0 --path=/var/lib/clickhouse/ --only-system-tables --stacktrace -q 'select * from system.zookeeper_log format TSVWithNamesAndTypes'
+ [[ 0 -eq 1 ]]
+ [[ 0 -eq 1 ]]
+ for table in query_log zookeeper_log trace_log transactions_info_log metric_log blob_storage_log error_log query_metric_log
+ clickhouse-local --max_block_size 8192 --max_memory_usage 10G --max_threads 1 --max_result_rows 0 --max_result_bytes 0 --max_bytes_to_read 0 --path=/var/lib/clickhouse/ --only-system-tables --stacktrace -q 'select * from system.trace_log format TSVWithNamesAndTypes'
+ zstd --threads=0
+ [[ 0 -eq 1 ]]
+ [[ 0 -eq 1 ]]
+ for table in query_log zookeeper_log trace_log transactions_info_log metric_log blob_storage_log error_log query_metric_log
+ clickhouse-local --max_block_size 8192 --max_memory_usage 10G --max_threads 1 --max_result_rows 0 --max_result_bytes 0 --max_bytes_to_read 0 --path=/var/lib/clickhouse/ --only-system-tables --stacktrace -q 'select * from system.transactions_info_log format TSVWithNamesAndTypes'
+ zstd --threads=0
+ [[ 0 -eq 1 ]]
+ [[ 0 -eq 1 ]]
+ for table in query_log zookeeper_log trace_log transactions_info_log metric_log blob_storage_log error_log query_metric_log
+ clickhouse-local --max_block_size 8192 --max_memory_usage 10G --max_threads 1 --max_result_rows 0 --max_result_bytes 0 --max_bytes_to_read 0 --path=/var/lib/clickhouse/ --only-system-tables --stacktrace -q 'select * from system.metric_log format TSVWithNamesAndTypes'
+ zstd --threads=0
+ [[ 0 -eq 1 ]]
+ [[ 0 -eq 1 ]]
+ for table in query_log zookeeper_log trace_log transactions_info_log metric_log blob_storage_log error_log query_metric_log
+ clickhouse-local --max_block_size 8192 --max_memory_usage 10G --max_threads 1 --max_result_rows 0 --max_result_bytes 0 --max_bytes_to_read 0 --path=/var/lib/clickhouse/ --only-system-tables --stacktrace -q 'select * from system.blob_storage_log format TSVWithNamesAndTypes'
+ zstd --threads=0
+ [[ 0 -eq 1 ]]
+ [[ 0 -eq 1 ]]
+ for table in query_log zookeeper_log trace_log transactions_info_log metric_log blob_storage_log error_log query_metric_log
+ zstd --threads=0
+ clickhouse-local --max_block_size 8192 --max_memory_usage 10G --max_threads 1 --max_result_rows 0 --max_result_bytes 0 --max_bytes_to_read 0 --path=/var/lib/clickhouse/ --only-system-tables --stacktrace -q 'select * from system.error_log format TSVWithNamesAndTypes'
+ [[ 0 -eq 1 ]]
+ [[ 0 -eq 1 ]]
+ for table in query_log zookeeper_log trace_log transactions_info_log metric_log blob_storage_log error_log query_metric_log
+ clickhouse-local --max_block_size 8192 --max_memory_usage 10G --max_threads 1 --max_result_rows 0 --max_result_bytes 0 --max_bytes_to_read 0 --path=/var/lib/clickhouse/ --only-system-tables --stacktrace -q 'select * from system.query_metric_log format TSVWithNamesAndTypes'
+ zstd --threads=0
+ [[ 0 -eq 1 ]]
+ [[ 0 -eq 1 ]]
+ for trace_type in CPU Memory Real
+ zstd --threads=0
+ clickhouse-local --path=/var/lib/clickhouse/ --only-system-tables -q '
select
arrayStringConcat((arrayMap(x -> concat(splitByChar('\''/'\'', addressToLine(x))[-1], '\''#'\'', demangle(addressToSymbol(x)) ), trace)), '\'';'\'') AS stack,
count(*) AS samples
from system.trace_log
where trace_type = '\''CPU'\''
group by trace
order by samples desc
settings allow_introspection_functions = 1
format TabSeparated'
+ for trace_type in CPU Memory Real
+ zstd --threads=0
+ clickhouse-local --path=/var/lib/clickhouse/ --only-system-tables -q '
select
arrayStringConcat((arrayMap(x -> concat(splitByChar('\''/'\'', addressToLine(x))[-1], '\''#'\'', demangle(addressToSymbol(x)) ), trace)), '\'';'\'') AS stack,
count(*) AS samples
from system.trace_log
where trace_type = '\''Memory'\''
group by trace
order by samples desc
settings allow_introspection_functions = 1
format TabSeparated'
+ for trace_type in CPU Memory Real
+ clickhouse-local --path=/var/lib/clickhouse/ --only-system-tables -q '
select
arrayStringConcat((arrayMap(x -> concat(splitByChar('\''/'\'', addressToLine(x))[-1], '\''#'\'', demangle(addressToSymbol(x)) ), trace)), '\'';'\'') AS stack,
count(*) AS samples
from system.trace_log
where trace_type = '\''Real'\''
group by trace
order by samples desc
settings allow_introspection_functions = 1
format TabSeparated'
+ zstd --threads=0
+ check_logs_for_critical_errors
+ sed -n '/WARNING:.*anitizer/,/^$/p' /var/log/clickhouse-server/stderr.log
+ rg -Fav -e 'ASan doesn'\''t fully support makecontext/swapcontext functions' -e DB::Exception /test_output/tmp
+ echo -e 'No sanitizer asserts\tOK\t\N\t'
+ rm -f /test_output/tmp
+ rg -Fa ' Application: Child process was terminated by signal 9' /var/log/clickhouse-server/clickhouse-server.err.log /var/log/clickhouse-server/clickhouse-server.log
+ echo -e 'No OOM messages in clickhouse-server.log\tOK\t\N\t'
+ rg -Fa 'Code: 49. DB::Exception: ' /var/log/clickhouse-server/clickhouse-server.err.log /var/log/clickhouse-server/clickhouse-server.log
+ echo -e 'No logical errors\tOK\t\N\t'
+ '[' -s /test_output/logical_errors.txt ']'
+ rm /test_output/logical_errors.txt
+ rg --text 'Code: 499.*The specified key does not exist' /var/log/clickhouse-server/clickhouse-server.err.log /var/log/clickhouse-server/clickhouse-server.log
+ grep -v -e a.myext -e DistributedCacheTCPHandler -e ReadBufferFromDistributedCache -e ReadBufferFromS3 -e ReadBufferFromAzureBlobStorage -e AsynchronousBoundedReadBuffer -e 'caller id: None:DistribCache'
+ echo -e 'No lost s3 keys\tOK\t\N\t'
+ rg -Fa 'it is lost forever' /var/log/clickhouse-server/clickhouse-server.err.log /var/log/clickhouse-server/clickhouse-server.log
+ grep SharedMergeTreePartCheckThread
+ echo -e 'No SharedMergeTree lost forever in clickhouse-server.log\tOK\t\N\t'
+ '[' -s /test_output/no_such_key_errors.txt ']'
+ rm /test_output/no_such_key_errors.txt
+ rg -Fa '#######################################' /var/log/clickhouse-server/clickhouse-server.err.log /var/log/clickhouse-server/clickhouse-server.log
+ echo -e 'Killed by signal (in clickhouse-server.log)\tFAIL\t\N\t'
+ rg -Fa ' ' /var/log/clickhouse-server/clickhouse-server.err.log /var/log/clickhouse-server/clickhouse-server.log
++ trim_server_logs fatal_messages.txt
++ head -n 100 /test_output/fatal_messages.txt
++ grep -Eo ' \[ [0-9]+ \] \{.*'
++ escaped
++ clickhouse local -S 's String' --input-format=LineAsString -q 'select substr(s, 1, 300)
from table format CustomSeparated settings format_custom_row_after_delimiter='\''\\\\n'\'''
+ echo -e 'Fatal message in clickhouse-server.log (see fatal_messages.txt)\tFAIL\t\N\t [ 4059 ] {36738c40-c9bd-49ff-8c5d-a6cb75e0448d} : Logical error: \'\''Replica info is not initialized\'\''.\\n [ 4059 ] {36738c40-c9bd-49ff-8c5d-a6cb75e0448d} : Stack trace (when copying this message, always include the lines below):\\n [ 26802 ] {} BaseDaemon: ########## Short fault info ############\\n [ 26802 ] {} BaseDaemon: (version 24.12.2.20221.altinityantalya (altinity build), build id: E3EBE392F6E30F869D3C8A7C787AA68127B123A4, git hash: 82252d159dc02cab0f366aaa5691adc1545dd11d, architecture: x86_64) (from thread 4059) Received signal 6\\n [ 26802 ] {} BaseDaemon: Signal description: Aborted\\n [ 26802 ] {} BaseDaemon: \\n [ 26802 ] {} BaseDaemon: Stack trace: 0x000056504274ba5e 0x00005650431471b7 0x000056502a871bae 0x00007fab77dfb520 0x00007fab77e4f9fd 0x00007fab77dfb476 0x00007fab77de17f3 0x000056504269d48b 0x000056504269ee61 0x000056502a8991d5 0x000056502a8c0804 0x0000565056af43cd 0x0000565056aed488 0x0000\\n [ 26802 ] {} BaseDaemon: ########################################\\n [ 26802 ] {} BaseDaemon: (version 24.12.2.20221.altinityantalya (altinity build), build id: E3EBE392F6E30F869D3C8A7C787AA68127B123A4, git hash: 82252d159dc02cab0f366aaa5691adc1545dd11d) (from thread 4059) (query_id: 36738c40-c9bd-49ff-8c5d-a6cb75e0448d) (query: INSERT INTO distributed_01099\\n [ 26802 ] {} BaseDaemon: \\n [ 26802 ] {} BaseDaemon: Stack trace: 0x000056504274ba5e 0x00005650431471b7 0x000056502a871bae 0x00007fab77dfb520 0x00007fab77e4f9fd 0x00007fab77dfb476 0x00007fab77de17f3 0x000056504269d48b 0x000056504269ee61 0x000056502a8991d5 0x000056502a8c0804 0x0000565056af43cd 0x0000565056aed488 0x0000\\n [ 26802 ] {} BaseDaemon: 0.0. inlined from ./build_docker/./src/Common/StackTrace.cpp:381: StackTrace::tryCapture()\\n [ 26802 ] {} BaseDaemon: 0. ./build_docker/./src/Common/StackTrace.cpp:350: StackTrace::StackTrace(ucontext_t const&) @ 0x000000002077fa5e\\n [ 26802 ] {} BaseDaemon: 1. ./build_docker/./src/Common/SignalHandlers.cpp:102: signalHandler(int, siginfo_t*, void*) @ 0x000000002117b1b7\\n [ 26802 ] {} BaseDaemon: 2. SignalAction(int, void*, void*) @ 0x00000000088a5bae\\n [ 26802 ] {} BaseDaemon: 3. ? @ 0x00007fab77dfb520\\n [ 26802 ] {} BaseDaemon: 4. ? @ 0x00007fab77e4f9fd\\n [ 26802 ] {} BaseDaemon: 5. ? @ 0x00007fab77dfb476\\n [ 26802 ] {} BaseDaemon: 6. ? @ 0x00007fab77de17f3\\n [ 26802 ] {} BaseDaemon: 7. ./build_docker/./src/Common/Exception.cpp:48: DB::abortOnFailedAssertion(String const&, void* const*, unsigned long, unsigned long) @ 0x00000000206d148b\\n [ 26802 ] {} BaseDaemon: 8.0. inlined from ./build_docker/./src/Common/Exception.cpp:70: DB::handle_error_code(String const&, int, bool, std::vector> const&)\\n [ 26802 ] {} BaseDaemon: 8. ./build_docker/./src/Common/Exception.cpp:111: DB::Exception::Exception(DB::Exception::MessageMasked&&, int, bool) @ 0x00000000206d2e61\\n [ 26802 ] {} BaseDaemon: 9. DB::Exception::Exception(PreformattedMessage&&, int) @ 0x00000000088cd1d5\\n [ 26802 ] {} BaseDaemon: 10. DB::Exception::Exception<>(int, FormatStringHelperImpl<>) @ 0x00000000088f4804\\n [ 26802 ] {} BaseDaemon: 11. ./build_docker/./src/QueryPipeline/RemoteQueryExecutor.cpp:727: DB::RemoteQueryExecutor::processReadTaskRequest() @ 0x0000000034b283cd\\n [ 26802 ] {} BaseDaemon: 12. ./build_docker/./src/QueryPipeline/RemoteQueryExecutor.cpp:623: DB::RemoteQueryExecutor::processPacket(DB::Packet) @ 0x0000000034b21488\\n [ 26802 ] {} BaseDaemon: 13. ./build_docker/./src/QueryPipeline/RemoteQueryExecutor.cpp:562: DB::RemoteQueryExecutor::readAsync() @ 0x0000000034b25dc3\\n [ 26802 ] {} BaseDaemon: 14. ./build_docker/./src/Processors/Sources/RemoteSource.cpp:182: DB::RemoteSource::tryGenerate() @ 0x0000000040415925\\n [ 26802 ] {} BaseDaemon: 15. ./build_docker/./src/Processors/ISource.cpp:108: DB::ISource::work() @ 0x000000003f6e8571\\n [ 26802 ] {} BaseDaemon: 16. ./build_docker/./src/Processors/Sources/RemoteSource.cpp:134: DB::RemoteSource::work() @ 0x0000000040414fb0\\n [ 26802 ] {} BaseDaemon: 17.0. inlined from ./build_docker/./src/Processors/Executors/ExecutionThreadContext.cpp:49: DB::executeJob(DB::ExecutingGraph::Node*, DB::ReadProgressCallback*)\\n [ 26802 ] {} BaseDaemon: 17. ./build_docker/./src/Processors/Executors/ExecutionThreadContext.cpp:98: DB::ExecutionThreadContext::executeTask() @ 0x000000003f7538b1\\n [ 26802 ] {} BaseDaemon: 18. ./build_docker/./src/Processors/Executors/PipelineExecutor.cpp:290: DB::PipelineExecutor::executeStepImpl(unsigned long, std::atomic*) @ 0x000000003f71eb14\\n [ 26802 ] {} BaseDaemon: 19.0. inlined from ./build_docker/./src/Processors/Executors/PipelineExecutor.cpp:256: DB::PipelineExecutor::executeSingleThread(unsigned long)\\n [ 26802 ] {} BaseDaemon: 19. ./build_docker/./src/Processors/Executors/PipelineExecutor.cpp:442: DB::PipelineExecutor::executeImpl(unsigned long, bool) @ 0x000000003f71c8c3\\n [ 26802 ] {} BaseDaemon: 20. ./build_docker/./src/Processors/Executors/PipelineExecutor.cpp:127: DB::PipelineExecutor::execute(unsigned long, bool) @ 0x000000003f71c2f5\\n [ 26802 ] {} BaseDaemon: 21.0. inlined from ./build_docker/./src/Processors/Executors/CompletedPipelineExecutor.cpp:49: DB::threadFunction(DB::CompletedPipelineExecutor::Data&, std::shared_ptr, unsigned long, bool)\\n [ 26802 ] {} BaseDaemon: 21.1. inlined from ./build_docker/./src/Processors/Executors/CompletedPipelineExecutor.cpp:89: operator()\\n [ 26802 ] {} BaseDaemon: 21.2. inlined from ./contrib/llvm-project/libcxx/include/__functional/invoke.h:394: ?\\n [ 26802 ] {} BaseDaemon: 21.3. inlined from ./contrib/llvm-project/libcxx/include/tuple:1789: _ZNSt3__118__apply_tuple_implB6v15007IRZN2DB25CompletedPipelineExecutor7executeEvE3$_0RNS_5tupleIJEEETpTnmJEEEDcOT_OT0_NS_15__tuple_indicesIJXspT1_EEEE\\n [ 26802 ] {} BaseDaemon: 21.4. inlined from ./contrib/llvm-project/libcxx/include/tuple:1798: decltype(auto) std::apply[abi:v15007]&>(DB::CompletedPipelineExecutor::execute()::$_0&, std::tuple<>&)\\n [ 26802 ] {} BaseDaemon: 21.5. inlined from ./src/Common/ThreadPool.h:311: operator()\\n [ 26802 ] {} BaseDaemon: 21.6. inlined from ./contrib/llvm-project/libcxx/include/__functional/invoke.h:394: ?\\n [ 26802 ] {} BaseDaemon: 21.7. inlined from ./contrib/llvm-project/libcxx/include/__functional/invoke.h:479: ?\\n [ 26802 ] {} BaseDaemon: 21.8. inlined from ./contrib/llvm-project/libcxx/include/__functional/function.h:235: ?\\n [ 26802 ] {} BaseDaemon: 21. ./contrib/llvm-project/libcxx/include/__functional/function.h:716: ? @ 0x000000003f719de5\\n [ 26802 ] {} BaseDaemon: 22.0. inlined from ./contrib/llvm-project/libcxx/include/__functional/function.h:848: ?\\n [ 26802 ] {} BaseDaemon: 22.1. inlined from ./contrib/llvm-project/libcxx/include/__functional/function.h:1197: ?\\n [ 26802 ] {} BaseDaemon: 22. ./build_docker/./src/Common/ThreadPool.cpp:785: ThreadPoolImpl::ThreadFromThreadPool::worker() @ 0x00000000209762ca\\n [ 26802 ] {} BaseDaemon: 23.0. inlined from ./contrib/llvm-project/libcxx/include/__functional/invoke.h:359: ?\\n [ 26802 ] {} BaseDaemon: 23.1. inlined from ./contrib/llvm-project/libcxx/include/thread:284: void std::__thread_execute[abi:v15007]>, void (ThreadPoolImpl::ThreadFromThreadPool::*)(), ThreadPoolImp\\n [ 26802 ] {} BaseDaemon: 23. ./contrib/llvm-project/libcxx/include/thread:295: void* std::__thread_proxy[abi:v15007]>, void (ThreadPoolImpl::ThreadFromThreadPool::*)(), ThreadPoolImpl BaseDaemon: 24. ? @ 0x00007fab77e4dac3\\n [ 26802 ] {} BaseDaemon: 25. ? @ 0x00007fab77edf850\\n [ 26802 ] {} BaseDaemon: Integrity check of the executable successfully passed (checksum: 003D0F8422331117266F6835F6AB47FF)\\n [ 26802 ] {} BaseDaemon: This ClickHouse version is not official and should be upgraded to the official build.\\n [ 26802 ] {} BaseDaemon: Changed settings: min_compress_block_size = 2676963, max_compress_block_size = 1361695, max_block_size = 23650, min_external_table_block_size_bytes = 100000000, max_joined_block_size_rows = 26715, max_insert_threads = 1, max_threads = 2, max_parsing_threads = 1, max\\n [ 410 ] {} Application: Child process was terminated by signal 6.\\n [ 4059 ] {36738c40-c9bd-49ff-8c5d-a6cb75e0448d} : Logical error: \'\''Replica info is not initialized\'\''.\\n [ 4059 ] {36738c40-c9bd-49ff-8c5d-a6cb75e0448d} : Stack trace (when copying this message, always include the lines below):\\n [ 26802 ] {} BaseDaemon: ########## Short fault info ############\\n [ 26802 ] {} BaseDaemon: (version 24.12.2.20221.altinityantalya (altinity build), build id: E3EBE392F6E30F869D3C8A7C787AA68127B123A4, git hash: 82252d159dc02cab0f366aaa5691adc1545dd11d, architecture: x86_64) (from thread 4059) Received signal 6\\n [ 26802 ] {} BaseDaemon: Signal description: Aborted\\n [ 26802 ] {} BaseDaemon: \\n [ 26802 ] {} BaseDaemon: Stack trace: 0x000056504274ba5e 0x00005650431471b7 0x000056502a871bae 0x00007fab77dfb520 0x00007fab77e4f9fd 0x00007fab77dfb476 0x00007fab77de17f3 0x000056504269d48b 0x000056504269ee61 0x000056502a8991d5 0x000056502a8c0804 0x0000565056af43cd 0x0000565056aed488 0x0000\\n [ 26802 ] {} BaseDaemon: ########################################\\n [ 26802 ] {} BaseDaemon: (version 24.12.2.20221.altinityantalya (altinity build), build id: E3EBE392F6E30F869D3C8A7C787AA68127B123A4, git hash: 82252d159dc02cab0f366aaa5691adc1545dd11d) (from thread 4059) (query_id: 36738c40-c9bd-49ff-8c5d-a6cb75e0448d) (query: INSERT INTO distributed_01099\\n [ 26802 ] {} BaseDaemon: \\n [ 26802 ] {} BaseDaemon: Stack trace: 0x000056504274ba5e 0x00005650431471b7 0x000056502a871bae 0x00007fab77dfb520 0x00007fab77e4f9fd 0x00007fab77dfb476 0x00007fab77de17f3 0x000056504269d48b 0x000056504269ee61 0x000056502a8991d5 0x000056502a8c0804 0x0000565056af43cd 0x0000565056aed488 0x0000\\n [ 26802 ] {} BaseDaemon: 0.0. inlined from ./build_docker/./src/Common/StackTrace.cpp:381: StackTrace::tryCapture()\\n [ 26802 ] {} BaseDaemon: 0. ./build_docker/./src/Common/StackTrace.cpp:350: StackTrace::StackTrace(ucontext_t const&) @ 0x000000002077fa5e\\n [ 26802 ] {} BaseDaemon: 1. ./build_docker/./src/Common/SignalHandlers.cpp:102: signalHandler(int, siginfo_t*, void*) @ 0x000000002117b1b7\\n [ 26802 ] {} BaseDaemon: 2. SignalAction(int, void*, void*) @ 0x00000000088a5bae\\n [ 26802 ] {} BaseDaemon: 3. ? @ 0x00007fab77dfb520\\n [ 26802 ] {} BaseDaemon: 4. ? @ 0x00007fab77e4f9fd\\n [ 26802 ] {} BaseDaemon: 5. ? @ 0x00007fab77dfb476\\n [ 26802 ] {} BaseDaemon: 6. ? @ 0x00007fab77de17f3\\n [ 26802 ] {} BaseDaemon: 7. ./build_docker/./src/Common/Exception.cpp:48: DB::abortOnFailedAssertion(String const&, void* const*, unsigned long, unsigned long) @ 0x00000000206d148b\\n [ 26802 ] {} BaseDaemon: 8.0. inlined from ./build_docker/./src/Common/Exception.cpp:70: DB::handle_error_code(String const&, int, bool, std::vector> const&)\\n [ 26802 ] {} BaseDaemon: 8. ./build_docker/./src/Common/Exception.cpp:111: DB::Exception::Exception(DB::Exception::MessageMasked&&, int, bool) @ 0x00000000206d2e61\\n [ 26802 ] {} BaseDaemon: 9. DB::Exception::Exception(PreformattedMessage&&, int) @ 0x00000000088cd1d5\\n [ 26802 ] {} BaseDaemon: 10. DB::Exception::Exception<>(int, FormatStringHelperImpl<>) @ 0x00000000088f4804\\n [ 26802 ] {} BaseDaemon: 11. ./build_docker/./src/QueryPipeline/RemoteQueryExecutor.cpp:727: DB::RemoteQueryExecutor::processReadTaskRequest() @ 0x0000000034b283cd\\n [ 26802 ] {} BaseDaemon: 12. ./build_docker/./src/QueryPipeline/RemoteQueryExecutor.cpp:623: DB::RemoteQueryExecutor::processPacket(DB::Packet) @ 0x0000000034b21488\\n [ 26802 ] {} BaseDaemon: 13. ./build_docker/./src/QueryPipeline/RemoteQueryExecutor.cpp:562: DB::RemoteQueryExecutor::readAsync() @ 0x0000000034b25dc3\\n [ 26802 ] {} BaseDaemon: 14. ./build_docker/./src/Processors/Sources/RemoteSource.cpp:182: DB::RemoteSource::tryGenerate() @ 0x0000000040415925\\n [ 26802 ] {} BaseDaemon: 15. ./build_docker/./src/Processors/ISource.cpp:108: DB::ISource::work() @ 0x000000003f6e8571\\n [ 26802 ] {} BaseDaemon: 16. ./build_docker/./src/Processors/Sources/RemoteSource.cpp:134: DB::RemoteSource::work() @ 0x0000000040414fb0\\n [ 26802 ] {} BaseDaemon: 17.0. inlined from ./build_docker/./src/Processors/Executors/ExecutionThreadContext.cpp:49: DB::executeJob(DB::ExecutingGraph::Node*, DB::ReadProgressCallback*)\\n [ 26802 ] {} BaseDaemon: 17. ./build_docker/./src/Processors/Executors/ExecutionThreadContext.cpp:98: DB::ExecutionThreadContext::executeTask() @ 0x000000003f7538b1\\n [ 26802 ] {} BaseDaemon: 18. ./build_docker/./src/Processors/Executors/PipelineExecutor.cpp:290: DB::PipelineExecutor::executeStepImpl(unsigned long, std::atomic*) @ 0x000000003f71eb14\\n [ 26802 ] {} BaseDaemon: 19.0. inlined from ./build_docker/./src/Processors/Executors/PipelineExecutor.cpp:256: DB::PipelineExecutor::executeSingleThread(unsigned long)\\n [ 26802 ] {} BaseDaemon: 19. ./build_docker/./src/Processors/Executors/PipelineExecutor.cpp:442: DB::PipelineExecutor::executeImpl(unsigned long, bool) @ 0x000000003f71c8c3\\n [ 26802 ] {} BaseDaemon: 20. ./build_docker/./src/Processors/Executors/PipelineExecutor.cpp:127: DB::PipelineExecutor::execute(unsigned long, bool) @ 0x000000003f71c2f5\\n [ 26802 ] {}